input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
- data1))
"""
def __init__(self, in_dim, cond_dim, n_flow_steps,
wn_num_conv1d, wn_dim_channel, wn_kernel_size,
flag_affine=True,
flag_split = False,
flag_final_block=False,
split_dim = 2,
flag_affine_block_legacy=False):
"""WaveGlowBlock(in_dim, cond_dim, n_flow_steps,
wn_num_conv1d, wn_dim_channel, wn_kernel_size,
flag_affine=True, flag_split = False, split_dim = 2,
flag_affine_block_legacy=False)
Args
----
in_dim: int, input feature dim, (batch, length, in_dim)
cond_dim:, int, conditional feature dim, (batch, length, cond_dim)
n_flow_steps: int, number of flow steps in one block
wn_num_conv1d: int, number of dilated conv WaveNet blocks
wn_dim_channel: int, dim of the WaveNet residual and skip channels
wn_kernel_size: int, kernel size of the dilated convolution layers
flag_affine: bool, whether use affine or additive transformation?
default True
flag_split: bool, whether split output z for multi-scale structure
default True
flag_final_block: bool, whether this block is the final block
default False
split_dim: int, if flag_split==True, z[:, :, :split_dim] will be
extracted, z[:, :, split_dim:] can be used for the next
WaveGlowBlock
flag_affine_block_legacy, bool, whether use the legacy implementation
of wavenet-based affine transformaiton layer
default False.
For wn_dim_channel and wn_kernel_size, see AffineCouplingWaveGlow
For flag_affine, see AffineCouplingWaveGlow
"""
super(WaveGlowBlock, self).__init__()
tmp_flows = []
for i in range(n_flow_steps):
tmp_flows.append(
FlowStepWaveGlow(
in_dim, cond_dim,
wn_num_conv1d, wn_dim_channel, wn_kernel_size,
flag_affine, flag_affine_block_legacy))
self.m_flows = torch_nn.ModuleList(tmp_flows)
self.flag_split = flag_split
self.flag_final_block = flag_final_block
self.split_dim = split_dim
if self.flag_split and self.flag_final_block:
print("WaveGlowBlock: flag_split and flag_final_block are True")
print("This is unexpected. Please check model definition")
sys.exit(1)
if self.flag_split and self.split_dim <= 0:
print("WaveGlowBlock: split_dim should be > 0")
sys.exit(1)
return
def forward(self, y, cond, factor=1):
"""x, z, log_detjac = WaveGlowBlock(y)
y -> H() -> [z, x], log_det_jacobian
H() consists of multiple flow steps (1x1conv + AffineCoupling)
input
-----
y: tensor, (batch, length, dim)
cond, tensor, (batch, length, cond_dim)
factor, None or int, this is used to divde likelihood, default 1
output
------
log_detjac: tensor or scalar
if self.flag_split:
x: tensor, (batch, length, in_dim - split_dim),
z: tensor, (batch, length, split_dim),
else:
if self.flag_final_block:
x: None, no input to the next block
z: tensor, (batch, length, dim), for N(z; 0, I)
else:
x: tensor, (batch, length, dim),
z: None, no latent for N(z; 0, I) from this block
concate([x,z]) should have the same size as y
"""
# flows
log_detjac = 0
x_tmp = y
for l_flow in self.m_flows:
x_tmp, log_detjac_tmp = l_flow(x_tmp, cond, factor)
log_detjac = log_detjac + log_detjac_tmp
if self.flag_split:
z = x_tmp[:, :, :self.split_dim]
x = x_tmp[:, :, self.split_dim:]
else:
if self.flag_final_block:
z = x_tmp
x = None
else:
z = None
x = x_tmp
return x, z, log_detjac
def reverse(self, x, z, cond):
"""y = WaveGlowBlock.reverse(x, z, cond)
[z, x] -> H^{-1}() -> y
input
-----
if self.flag_split:
x: tensor, (batch, length, in_dim - split_dim),
z: tensor, (batch, length, split_dim),
else:
if self.flag_final_block:
x: None
z: tensor, (batch, length, in_dim)
else:
x: tensor, (batch, length, in_dim)
z: None
output
------
y: tensor, (batch, length, in_dim)
"""
if self.flag_split:
if x is None or z is None:
print("WaveGlowBlock.reverse: x and z should not be None")
sys.exit(1)
y_tmp = torch.cat([z, x], dim=-1)
else:
if self.flag_final_block:
if z is None:
print("WaveGlowBlock.reverse: z should not be None")
sys.exit(1)
y_tmp = z
else:
if x is None:
print("WaveGlowBlock.reverse: x should not be None")
sys.exit(1)
y_tmp = x
for l_flow in self.m_flows[::-1]:
# affine
y_tmp = l_flow.reverse(y_tmp, cond)
return y_tmp
class WaveGlow(torch_nn.Module):
"""WaveGlow
Example
cond_dim = 4
upsample = 80
num_blocks = 4
num_flows_inblock = 5
wn_num_conv1d = 8
wn_dim_channel = 512
wn_kernel_size = 3
# waveforms of length 1600
wave1 = torch.randn([2, 1600, 1])
# condition feature
cond = torch.randn([2, 1600//upsample, cond_dim])
# model
m_model = nii_waveglow.WaveGlow(
cond_dim, upsample,
num_blocks, num_flows_inblock, wn_num_conv1d,
wn_dim_channel, wn_kernel_size)
# forward computation, neg_log = -(logp + log_detjac)
# neg_log.backward() can be used for backward
z, neg_log, logp, log_detjac = m_model(wave1, cond)
# recover the signal
wave2 = m_model.reverse(z, cond)
# check difference between original wave and recovered wave
print(torch.std(wave1 - wave2))
"""
def __init__(self, cond_dim, upsample_rate,
num_blocks, num_flows_inblock,
wn_num_conv1d, wn_dim_channel, wn_kernel_size,
flag_affine = True,
early_hid_dim=2,
flag_affine_block_legacy=False):
"""WaveGlow(cond_dim, upsample_rate,
num_blocks, num_flows_inblock,
wn_num_conv1d, wn_dim_channel, wn_kernel_size,
flag_affine = True,
early_hid_dim=2,
flag_affine_block_legacy=False)
Args
----
cond_dim:, int, conditional feature dim, (batch, length, cond_dim)
upsample_rate: int, up-sampling rate for condition features
num_blocks: int, number of WaveGlowBlocks
num_flows_inblock: int, number of flow steps in one WaveGlowBlock
wn_num_conv1d: int, number of 1Dconv WaveNet block in this flow step
wn_dim_channel: int, dim of the WaveNet residual and skip channels
wn_kernel_size: int, kernel size of the dilated convolution layers
flag_affine: bool, whether use affine or additive transformation?
default True
early_hid_dim: int, dimension for z_1, z_2 ... , default 2
flag_affine_block_legacy, bool, whether use the legacy implementation
of wavenet-based affine transformaiton layer
default False. The difference is on the WaveNet part
Please configure AffineCouplingWaveGlow and
AffineCouplingWaveGlow_legacy
This model defines:
cond -> upsample/squeeze -> | ------> | --------> |
v v v
y -> squeeze -> WaveGlowBlock -> WGBlock ... WGBlock -> z
|-> z_1 |-> z_2
z_1, z_2, ... are the extracted z from a multi-scale flow structure
concate([z_1, z_2, z]) is expected to be the white Gaussian noise
If early_hid_dim == 0, z_1 and z_2 will not be extracted
"""
super(WaveGlow, self).__init__()
# input is assumed to be waveform
self.m_input_dim = 1
self.m_early_hid_dim = early_hid_dim
# squeeze layer
self.m_squeeze = SqueezeForWaveGlow()
# up-sampling layer
#self.m_upsample = nii_nn.UpSampleLayer(cond_dim, upsample_rate, True)
self.m_upsample = upsampleByTransConv(cond_dim, upsample_rate)
# wavenet-based flow blocks
# squeezed input dimension
squeezed_in_dim = self.m_input_dim * self.m_squeeze.get_squeeze_factor()
# squeezed condition feature dimension
squeezed_cond_dim = cond_dim * self.m_squeeze.get_squeeze_factor()
# save the dimension for get_z_noises
self.m_feat_dim = []
# define blocks
tmp_squeezed_in_dim = squeezed_in_dim
tmp_flow_blocks = []
for i in range(num_blocks):
# if this is not the last block and early_hid_dim >0
flag_split = (i < (num_blocks-1)) and early_hid_dim > 0
flag_final_block = i == (num_blocks-1)
# save the dimension for get_z_noises
if flag_final_block:
self.m_feat_dim.append(tmp_squeezed_in_dim)
else:
self.m_feat_dim.append(early_hid_dim if flag_split else 0)
tmp_flow_blocks.append(
WaveGlowBlock(
tmp_squeezed_in_dim, squeezed_cond_dim, num_flows_inblock,
wn_num_conv1d, wn_dim_channel, wn_kernel_size, flag_affine,
flag_split = flag_split, flag_final_block=flag_final_block,
split_dim = early_hid_dim,
flag_affine_block_legacy = flag_affine_block_legacy))
# multi-scale approach will extract a few dimensions for next flow
# thus, input dimension to the next block will be this
tmp_squeezed_in_dim = tmp_squeezed_in_dim - early_hid_dim
self.m_flowblocks = torch_nn.ModuleList(tmp_flow_blocks)
# done
return
def _normal_lh(self, noise):
# likelihood of normal distribution on the given noise
return -0.5 * np.log(2 * np.pi) - 0.5 * noise ** 2
def forward(self, y, cond):
"""z, neg_logp_y, logp_z, logdet = WaveGlow.forward(y, cond)
cond -> upsample/squeeze -> | ------> | --------> |
v v v
y -> squeeze -> WaveGlowBlock -> WGBlock ... WGBlock -> z
|-> z_1 |-> z_2
input
-----
y: tensor, (batch, waveform_length, 1)
cond: tensor, (batch, cond_length, 1)
output
------
z: list of tensors, [z_1, z_2, ... ,z ] in figure above
neg_logp_y: scalar, - log p(y)
logp_z: scalar, -log N(z), summed over one data sequence, but averaged
over batch.
logdet: scalar, -|det dH(.)/dy|, summed over one data sequence,
but averaged
over batch.
If self.early_hid_dim == 0, z_1, z_2 ... will be None
"""
# Rather than summing the likelihood and divide it by the number of
# data in the final step, we divide this factor from the likelihood
# caculating by each flow step and sum the scaled likelihood.
# Two methods are equivalent, but the latter may prevent numerical
# overflow of the likelihood value for long sentences
factor = np.prod([dim for dim in y.shape])
# waveform squeeze (batch, squeezed_length, squeezed_dim)
y_squeezed = self.m_squeeze(y)
squeezed_dim = y_squeezed.shape[-1]
# condition feature upsampling and | |
"ordering": ["name"],
"abstract": False,
},
),
migrations.CreateModel(
name="PlanUnitIntendedUse",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255, verbose_name="Name")),
],
options={
"verbose_name": "Plan unit intended use",
"verbose_name_plural": "Plan unit intended uses",
"ordering": ["name"],
"abstract": False,
},
),
migrations.CreateModel(
name="PlanUnitState",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255, verbose_name="Name")),
],
options={
"verbose_name": "Plan unit state",
"verbose_name_plural": "Plan unit states",
"ordering": ["name"],
"abstract": False,
},
),
migrations.CreateModel(
name="PlanUnitType",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255, verbose_name="Name")),
],
options={
"verbose_name": "Plan unit type",
"verbose_name_plural": "Plan unit types",
"ordering": ["name"],
"abstract": False,
},
),
migrations.CreateModel(
name="PlotDivisionState",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255, verbose_name="Name")),
],
options={
"verbose_name": "Plot divisions state",
"verbose_name_plural": "Plot division states",
"ordering": ["name"],
"abstract": False,
},
),
migrations.CreateModel(
name="ReceivableType",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255, verbose_name="Name")),
(
"sap_material_code",
models.CharField(
blank=True,
max_length=255,
null=True,
verbose_name="SAP material code",
),
),
(
"sap_order_item_number",
models.CharField(
blank=True,
max_length=255,
null=True,
verbose_name="SAP order item number",
),
),
],
options={
"verbose_name": "Receivable type",
"verbose_name_plural": "Receivable types",
},
),
migrations.CreateModel(
name="Regulation",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255, verbose_name="Name")),
],
options={
"verbose_name": "Form of regulation",
"verbose_name_plural": "Forms of regulation",
"ordering": ["name"],
"abstract": False,
},
),
migrations.CreateModel(
name="Rent",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("deleted", models.DateTimeField(editable=False, null=True)),
(
"created_at",
models.DateTimeField(
auto_now_add=True, verbose_name="Time created"
),
),
(
"modified_at",
models.DateTimeField(auto_now=True, verbose_name="Time modified"),
),
(
"type",
enumfields.fields.EnumField(
enum=leasing.enums.RentType, max_length=30, verbose_name="Type"
),
),
(
"cycle",
enumfields.fields.EnumField(
blank=True,
enum=leasing.enums.RentCycle,
max_length=30,
null=True,
verbose_name="Cycle",
),
),
(
"index_type",
enumfields.fields.EnumField(
blank=True,
enum=leasing.enums.IndexType,
max_length=30,
null=True,
verbose_name="Index type",
),
),
(
"due_dates_type",
enumfields.fields.EnumField(
blank=True,
enum=leasing.enums.DueDatesType,
max_length=30,
null=True,
verbose_name="Due dates type",
),
),
(
"due_dates_per_year",
models.PositiveIntegerField(
blank=True, null=True, verbose_name="Due dates per year"
),
),
(
"elementary_index",
models.PositiveIntegerField(
blank=True, null=True, verbose_name="Elementary index"
),
),
(
"index_rounding",
models.PositiveIntegerField(
blank=True, null=True, verbose_name="Index rounding"
),
),
(
"x_value",
models.PositiveIntegerField(
blank=True, null=True, verbose_name="X value"
),
),
(
"y_value",
models.PositiveIntegerField(
blank=True, null=True, verbose_name="Y value"
),
),
(
"y_value_start",
models.DateField(
blank=True, null=True, verbose_name="Y value start date"
),
),
(
"equalization_start_date",
models.DateField(
blank=True, null=True, verbose_name="Equalization start date"
),
),
(
"equalization_end_date",
models.DateField(
blank=True, null=True, verbose_name="Equalization end date"
),
),
(
"amount",
models.DecimalField(
blank=True,
decimal_places=2,
max_digits=10,
null=True,
verbose_name="Amount",
),
),
("note", models.TextField(blank=True, null=True, verbose_name="Note")),
(
"start_date",
models.DateField(blank=True, null=True, verbose_name="Start date"),
),
(
"end_date",
models.DateField(blank=True, null=True, verbose_name="End date"),
),
(
"seasonal_start_day",
models.PositiveIntegerField(
blank=True,
null=True,
validators=[
django.core.validators.MinValueValidator(1),
django.core.validators.MaxValueValidator(31),
],
verbose_name="Seasonal start day",
),
),
(
"seasonal_start_month",
models.PositiveIntegerField(
blank=True,
null=True,
validators=[
django.core.validators.MinValueValidator(1),
django.core.validators.MaxValueValidator(12),
],
verbose_name="Seasonal start month",
),
),
(
"seasonal_end_day",
models.PositiveIntegerField(
blank=True,
null=True,
validators=[
django.core.validators.MinValueValidator(1),
django.core.validators.MaxValueValidator(31),
],
verbose_name="Seasonal end day",
),
),
(
"seasonal_end_month",
models.PositiveIntegerField(
blank=True,
null=True,
validators=[
django.core.validators.MinValueValidator(1),
django.core.validators.MaxValueValidator(12),
],
verbose_name="Seasonal end month",
),
),
(
"manual_ratio",
models.DecimalField(
blank=True,
decimal_places=2,
max_digits=10,
null=True,
verbose_name="Manual ratio",
),
),
(
"manual_ratio_previous",
models.DecimalField(
blank=True,
decimal_places=2,
max_digits=10,
null=True,
verbose_name="Manual ratio (previous)",
),
),
(
"lease",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="rents",
to="leasing.Lease",
verbose_name="Lease",
),
),
],
options={"verbose_name": "Rent", "verbose_name_plural": "Rents"},
),
migrations.CreateModel(
name="RentAdjustment",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("deleted", models.DateTimeField(editable=False, null=True)),
(
"created_at",
models.DateTimeField(
auto_now_add=True, verbose_name="Time created"
),
),
(
"modified_at",
models.DateTimeField(auto_now=True, verbose_name="Time modified"),
),
(
"type",
enumfields.fields.EnumField(
enum=leasing.enums.RentAdjustmentType,
max_length=30,
verbose_name="Type",
),
),
(
"start_date",
models.DateField(blank=True, null=True, verbose_name="Start date"),
),
(
"end_date",
models.DateField(blank=True, null=True, verbose_name="End date"),
),
(
"full_amount",
models.DecimalField(
blank=True,
decimal_places=2,
max_digits=10,
null=True,
verbose_name="Full amount",
),
),
(
"amount_type",
enumfields.fields.EnumField(
enum=leasing.enums.RentAdjustmentAmountType,
max_length=30,
verbose_name="Amount type",
),
),
(
"amount_left",
models.DecimalField(
blank=True,
decimal_places=2,
max_digits=10,
null=True,
verbose_name="Amount left",
),
),
("note", models.TextField(blank=True, null=True, verbose_name="Note")),
(
"subvention_type",
enumfields.fields.EnumField(
blank=True,
enum=leasing.enums.SubventionType,
max_length=30,
null=True,
verbose_name="Subvention type",
),
),
(
"subvention_base_percent",
models.DecimalField(
blank=True,
decimal_places=2,
max_digits=10,
null=True,
verbose_name="Subvention base percent",
),
),
(
"subvention_graduated_percent",
models.DecimalField(
blank=True,
decimal_places=2,
max_digits=10,
null=True,
verbose_name="Graduated subvention percent",
),
),
(
"decision",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="+",
to="leasing.Decision",
verbose_name="Decision",
),
),
],
options={
"verbose_name": "Rent adjustment",
"verbose_name_plural": "Rent adjustments",
},
),
migrations.CreateModel(
name="RentIntendedUse",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255, verbose_name="Name")),
],
options={
"verbose_name": "Rent intended use",
"verbose_name_plural": "Rent intended uses",
"ordering": ["name"],
"abstract": False,
},
),
migrations.CreateModel(
name="ReservationProcedure",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255, verbose_name="Name")),
],
options={
"verbose_name": "Reservation procedure",
"verbose_name_plural": "Reservation Procedures",
"ordering": ["name"],
"abstract": False,
},
),
migrations.CreateModel(
name="SpecialProject",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255, verbose_name="Name")),
],
options={
"verbose_name": "Special project",
"verbose_name_plural": "Special projects",
"ordering": ["name"],
"abstract": False,
},
),
migrations.CreateModel(
name="StatisticalUse",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255, verbose_name="Name")),
],
options={
"verbose_name": "Statistical use",
"verbose_name_plural": "Statistical uses",
"ordering": ["name"],
"abstract": False,
},
),
migrations.CreateModel(
name="SupportiveHousing",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255, verbose_name="Name")),
],
options={
"verbose_name": "Supportive housing",
"verbose_name_plural": "Supportive housings",
"ordering": ["name"],
"abstract": False,
},
),
migrations.CreateModel(
name="Tenant",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("deleted", models.DateTimeField(editable=False, null=True)),
(
"created_at",
models.DateTimeField(
auto_now_add=True, verbose_name="Time created"
),
),
(
"modified_at",
models.DateTimeField(auto_now=True, verbose_name="Time modified"),
),
(
"share_numerator",
models.PositiveIntegerField(verbose_name="Numerator"),
),
(
"share_denominator",
models.PositiveIntegerField(verbose_name="Denominator"),
),
(
"reference",
models.CharField(
blank=True, max_length=35, null=True, verbose_name="Reference"
),
),
],
options={"verbose_name": "Tenant", "verbose_name_plural": "Tenants"},
),
migrations.CreateModel(
name="Vat",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"percent",
models.IntegerField(
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(100),
],
verbose_name="Percent",
),
),
("start_date", models.DateField(verbose_name="Start date")),
(
"end_date",
models.DateField(blank=True, null=True, verbose_name="End date"),
),
],
options={
"verbose_name": "VAT",
"verbose_name_plural": "VATs",
"ordering": ("-start_date",),
},
),
migrations.CreateModel(
name="UiData",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created_at",
models.DateTimeField(
auto_now_add=True, verbose_name="Time created"
),
),
(
"modified_at",
models.DateTimeField(auto_now=True, verbose_name="Time modified"),
),
("key", models.CharField(max_length=255, verbose_name="Key")),
("value", models.TextField(verbose_name="Value")),
(
"user",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="+",
to=settings.AUTH_USER_MODEL,
verbose_name="User",
),
),
],
options={
"verbose_name": "UI Datum",
"verbose_name_plural": "UI Data",
"permissions": (
(
"edit_global_ui_data",
"Can create, edit and delete global UI data",
),
),
},
),
migrations.CreateModel(
name="TenantRentShare",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("deleted", models.DateTimeField(editable=False, null=True)),
(
"created_at",
models.DateTimeField(
auto_now_add=True, verbose_name="Time created"
),
),
(
"modified_at",
models.DateTimeField(auto_now=True, verbose_name="Time modified"),
),
(
"share_numerator",
models.PositiveIntegerField(verbose_name="Rent share numerator"),
),
(
"share_denominator",
models.PositiveIntegerField(verbose_name="Rent share denominator"),
),
(
"intended_use",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="+",
to="leasing.RentIntendedUse",
verbose_name="Intended use",
),
),
(
"tenant",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="rent_shares",
to="leasing.Tenant",
verbose_name="Tenant",
),
),
],
options={
"verbose_name": "Tenant rent share",
"verbose_name_plural": "Tenant rent shares",
},
),
migrations.CreateModel(
name="TenantContact",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("deleted", models.DateTimeField(editable=False, null=True)),
(
"created_at",
models.DateTimeField(
auto_now_add=True, verbose_name="Time created"
),
),
(
"modified_at",
models.DateTimeField(auto_now=True, verbose_name="Time modified"),
),
(
"type",
enumfields.fields.EnumField(
enum=leasing.enums.TenantContactType, max_length=255
),
),
("start_date", models.DateField(verbose_name="Start date")),
(
"end_date",
models.DateField(blank=True, null=True, verbose_name="End date"),
),
(
"contact",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to="leasing.Contact",
verbose_name="Contact",
),
),
(
"tenant",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to="leasing.Tenant",
verbose_name="Tenant",
),
),
],
options={
"verbose_name": "Tenant contact",
"verbose_name_plural": "Tenant contacts",
},
),
migrations.AddField(
model_name="tenant",
name="contacts",
field=models.ManyToManyField(
related_name="tenants",
through="leasing.TenantContact",
to="leasing.Contact",
),
),
migrations.AddField(
model_name="tenant",
name="lease",
field=models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="tenants",
to="leasing.Lease",
verbose_name="Lease",
),
),
migrations.CreateModel(
name="TemporarySubvention",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"description",
models.CharField(
blank=True,
max_length=255,
null=True,
verbose_name="Description",
),
),
(
"subvention_percent",
models.DecimalField(
decimal_places=2,
max_digits=10,
verbose_name="Subvention percent",
),
),
(
"rent_adjustment",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="temporary_subventions",
to="leasing.RentAdjustment",
verbose_name="Rent adjustment",
),
),
],
options={
"verbose_name": "Temporary subvention (Rent adjustment)",
"verbose_name_plural": "Temporary subventions (Rent adjustment)",
},
),
migrations.CreateModel(
name="RentDueDate",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("deleted", models.DateTimeField(editable=False, null=True)),
(
"created_at",
models.DateTimeField(
auto_now_add=True, verbose_name="Time created"
),
),
(
"modified_at",
models.DateTimeField(auto_now=True, verbose_name="Time modified"),
),
(
"day",
models.IntegerField(
validators=[
django.core.validators.MinValueValidator(1),
django.core.validators.MaxValueValidator(31),
],
verbose_name="Day",
),
),
(
"month",
models.IntegerField(
validators=[
django.core.validators.MinValueValidator(1),
django.core.validators.MaxValueValidator(12),
],
verbose_name="Month",
),
),
(
"rent",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="due_dates",
to="leasing.Rent",
verbose_name="Rent",
),
),
],
options={
"verbose_name": "Rent due date",
"verbose_name_plural": "Rent due dates",
},
),
migrations.AddField(
model_name="rentadjustment",
name="intended_use",
field=models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="+",
to="leasing.RentIntendedUse",
verbose_name="Intended use",
),
),
migrations.AddField(
model_name="rentadjustment",
name="rent",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="rent_adjustments",
to="leasing.Rent",
verbose_name="Rent",
),
),
migrations.CreateModel(
name="RelatedLease",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("deleted", models.DateTimeField(editable=False, null=True)),
(
"created_at",
models.DateTimeField(
auto_now_add=True, verbose_name="Time created"
),
),
(
"modified_at",
models.DateTimeField(auto_now=True, verbose_name="Time modified"),
),
(
"type",
enumfields.fields.EnumField(
blank=True,
enum=leasing.enums.LeaseRelationType,
max_length=30,
null=True,
verbose_name="Lease relation type",
),
),
(
| |
import json
import mock
from werkzeug.routing import Rule
from doctor.docs import base
from doctor.resource import ResourceAnnotation
from doctor.response import Response
from .base import TestCase
from .types import (
Age, AgeOrColor, Auth, Color, Colors, ExampleArray, ExampleObject,
ExampleObjects, ExampleObjectsAndAge, FooInstance, IsAlive, IsDeleted, Name,
TwoItems)
from .utils import add_doctor_attrs
class TestDocsBase(TestCase):
def test_prefix_lines_bytes(self):
"""
This is a regression test where the response was a bytes instance.
"""
lines = b'"Notes API v1.0.0"'
prefix = ' '
expected = [' "Notes API v1.0.0"']
assert expected == base.prefix_lines(lines, prefix)
def test_get_example_lines_json(self):
"""Tests an example when the response is valid JSON."""
headers = {'GeoIp-Country-Code': 'US'}
lines = base.get_example_lines(headers, 'GET', 'http://example.com/',
{}, json.dumps({'foo': 1, 'bar': 2}))
assert lines == [
'',
'Example Request:',
'',
'.. code-block:: bash',
'',
' curl http://example.com/ -X GET -H \'GeoIp-Country-Code: US\'',
'',
'Example Response:',
'',
'.. code-block:: json',
'',
' {',
' "bar": 2,',
' "foo": 1',
' }',
]
def test_get_example_lines_text(self):
"""Tests an example when the response is *not* valid JSON."""
lines = base.get_example_lines({}, 'GET', 'http://example.com/', {},
'hello, world!')
assert lines == [
'',
'Example Request:',
'',
'.. code-block:: bash',
'',
' curl http://example.com/ -X GET',
'',
'Example Response:',
'',
'.. code-block:: text',
'',
' hello, world!',
]
def test_get_json_object_lines_for_request_with_enum(self):
def mock_logic(auth: Auth, is_alive: IsAlive, name: Name=None,
color: Color='blue'):
pass
mock_logic = add_doctor_attrs(mock_logic)
annotation = ResourceAnnotation(mock_logic, 'GET')
parameters = annotation.logic._doctor_signature.parameters
properties = {k: p.annotation for k, p in parameters.items()}
result = base.get_json_object_lines(
annotation, properties, field='>json', url_params=[],
request=True)
assert result == [
':>json str auth: **Required**. auth token',
':>json bool is_alive: **Required**. Is alive?',
(":>json str color: Color Must be one of: `['blue', 'green']` "
"(case-insensitive). (Defaults to `blue`) "),
':>json str name: name (Defaults to `None`) ']
def test_get_json_object_lines_for_request(self):
"""
This tests that when the request kwarg is True that any
required params have the description prefixed with
**Required** and sorted in alphabetical order, followed by
any optional parameters in alpabetical order.
"""
def mock_logic(auth: Auth, age: Age, is_deleted: IsDeleted=True):
pass
mock_logic = add_doctor_attrs(mock_logic)
annotation = ResourceAnnotation(mock_logic, 'GET')
parameters = annotation.logic._doctor_signature.parameters
properties = {k: p.annotation for k, p in parameters.items()}
url_params = ['age']
result = base.get_json_object_lines(
annotation, properties, field='>json', url_params=url_params,
request=True)
assert result == [
':param int age: **Required**. age',
':>json str auth: **Required**. auth token',
(':>json bool is_deleted: Indicates if the item should be marked '
'as deleted (Defaults to `True`) '),
]
def test_get_json_object_lines_object_response(self):
"""
This tests that when our response is an object that we return
all of it's documented properties.
"""
def mock_logic() -> ExampleObject:
pass
mock_logic = add_doctor_attrs(mock_logic)
annotation = ResourceAnnotation(mock_logic, 'GET')
result = base.get_json_lines(
annotation, field='>json', route='/foo', request=False)
expected = [
':>json str str: auth token'
]
assert expected == result
def test_get_json_lines_logic_defines_req_obj_type(self):
"""
This tests that we properly generate the json params for a request
when the logic function defines a `req_obj_type`.
"""
def mock_logic(foo: FooInstance):
pass
mock_logic = add_doctor_attrs(mock_logic, req_obj_type=FooInstance)
annotation = ResourceAnnotation(mock_logic, 'POST')
result = base.get_json_lines(
annotation, field='<json', route='/foo', request=True)
expected = [
':<json int foo_id: **Required**. foo id',
':<json str foo: foo'
]
assert expected == result
def test_get_json_lines_array_response(self):
"""
Verifies we document properties of an array of objects.
"""
def mock_logic() -> ExampleObjects:
pass
mock_logic = add_doctor_attrs(mock_logic)
annotation = ResourceAnnotation(mock_logic, 'GET')
result = base.get_json_lines(
annotation, field='<json', route='/foo')
assert result == [':<jsonarr str str: auth token']
def test_get_json_lines_response_response(self):
"""
Verifies when our response is a doctor.response.Response instance
and it has a type associated with it that we use that type to
document it.
"""
def mock_logic() -> Response[ExampleObject]:
pass
mock_logic = add_doctor_attrs(mock_logic)
annotation = ResourceAnnotation(mock_logic, 'GET')
result = base.get_json_lines(
annotation, field='>json', route='/foo', request=False)
expected = [
':>json str str: auth token'
]
assert expected == result
def test_get_name(self):
mock_class = mock.Mock()
mock_class.__module__ = 'foo.bar'
mock_class.__name__ = 'baz'
assert base.get_name(mock_class) == 'foo.bar.baz'
mock_class.__module__ = '__builtin__'
assert base.get_name(mock_class) == 'baz'
class TestDocsBaseHarness(TestCase):
def test_init(self):
harness = base.BaseHarness('http://foo/')
assert harness.url_prefix == 'http://foo'
def test_get_annotation_heading_doctor_heading(self):
"""
This test verifies we use the _doctor_heading attribute of the
handler if it is present.
"""
handler = mock.Mock(_doctor_heading='Test Title')
route = '^foo/?$'
harness = base.BaseHarness('http://foo/')
actual = harness._get_annotation_heading(handler, route)
assert 'Test Title' == actual
def test_get_annotation_heading_class_path(self):
"""
This test verifies if the class path has a resource name in it,
that we use it for the heading.
e.g. class <api.handlers.foo_bar.FooListHandler> becomes `Foo Bar`
"""
handler = mock.Mock(spec_set=base.BaseHarness)
handler.__str__ = mock.Mock(
return_value='<class api.foo_bar.FooBarListHandler>')
route = '^foo_bar/?$'
harness = base.BaseHarness('http://foo/')
actual = harness._get_annotation_heading(handler, route)
expected = 'Foo Bar'
assert expected == actual
def test_get_annotation_heading_generic_handlers(self):
"""
This test verifies if our handlers are not in their own resource
modules that we get the heading from the handler class name.
e.g. class <api.handlers.handlers.FooBarListHandler> becomes `Foo Bar`
"""
handler = mock.Mock(spec_set=base.BaseHarness)
handler.__str__ = mock.Mock(
return_value='<class api.handlers.FooBarListHandler>')
route = '^foo_bar/?$'
harness = base.BaseHarness('http://foo/')
actual = harness._get_annotation_heading(handler, route)
expected = 'Foo Bar'
assert expected == actual
def test_get_annotation_heading_class_name_only(self):
"""
This test verifies that if our handler has no path and is just
the class name that we get the heading from the name.
e.g. class <FooBarListHandler> becomes `Foo Bar`
"""
handler = mock.Mock(spec_set=base.BaseHarness)
handler.__str__ = mock.Mock(
return_value='<class FooBarListHandler>')
route = '^foo_bar/?$'
harness = base.BaseHarness('http://foo/')
actual = harness._get_annotation_heading(handler, route)
expected = 'Foo Bar'
assert expected == actual
def test_get_annotation_heading_class_path_internal(self):
"""
This test verifies the path where the class path has the resource
name in it and it's an internal route.
"""
handler = mock.Mock(spec_set=base.BaseHarness)
handler.__str__ = mock.Mock(
return_value='<class api.foo_bar.InternalFooBarListHandler>')
route = '^internal/r/foo_bar/?$'
harness = base.BaseHarness('http://foo/')
actual = harness._get_annotation_heading(handler, route)
expected = 'Foo Bar (Internal)'
assert expected == actual
@mock.patch('doctor.docs.base.hasattr')
def test_get_annotation_heading_class_name_only_internal(self, mock_has):
"""
This test verifies the path where our handler has no path and just
the class name and that class is internal.
"""
mock_has.return_value = False
handler = mock.MagicMock(spec_set=base.BaseHarness)
handler.__str__ = mock.Mock(
return_value='<class InternalFooBarListHandler>')
route = '^/internal/foo_bar/?$'
harness = base.BaseHarness('http://foo/')
actual = harness._get_annotation_heading(handler, route)
expected = 'Foo Bar (Internal)'
assert expected == actual
def test_get_example_values_get_http_method_with_list_and_dict_vals(self):
"""
This test verifies if the route we are generating example values for
is a GET endpoint and the example values are lists or dicts, that we
json.dumps them when they are returned. If the HTTP method is any
other method, they will be returned as normal lists or dicts.
"""
def mock_logic(e: ExampleArray, f: ExampleObject):
pass
mock_logic = add_doctor_attrs(mock_logic)
route = Rule('/foo/bar/')
annotation = ResourceAnnotation(mock_logic, 'GET')
harness = base.BaseHarness('http://foo/bar/')
example_values = harness._get_example_values(route, annotation)
expected = {
'e': json.dumps(['ex', 'array']),
'f': json.dumps({'str': 'ex str'}),
}
assert expected == example_values
# Change the http method to something other than GET, they should
# not be json dumped.
annotation.http_method = 'POST'
example_values = harness._get_example_values(route, annotation)
expected = {
'e': ['ex', 'array'],
'f': {'str': 'ex str'},
}
assert expected == example_values
def test_get_example_values_when_logic_defines_req_obj_type(self):
"""
This tests that we generate example values appropriately when the
route defineds a req_obj_type which will pass all request params as
that object instance to the logic function.
If a req_obj_type was not defined for the logic, it would expect
the json body to look like:
{
"foo": {
"foo": "foo",
"foo_id": 1
}
}
Defining a req_obj_type tells the code that the request body should
contain those attributes rather than a sub-key within the request.
"""
def mock_logic(foo: FooInstance):
pass
mock_logic = add_doctor_attrs(mock_logic, req_obj_type=FooInstance)
route = Rule('/foo/bar/')
annotation = ResourceAnnotation(mock_logic, 'POST')
harness = base.BaseHarness('http://foo/bar/')
example_values = harness._get_example_values(route, annotation)
expected = {
'foo': 'foo',
'foo_id': 1,
}
assert expected == example_values
def test_class_name_to_resource_name(self):
tests = (
# (input, expected)
('Foo', 'Foo'),
('FooBar', 'Foo Bar'),
('FooV1Bar', 'Foo V1 Bar'),
('Reallylongnamewithnoothercase', 'Reallylongnamewithnoothercase'),
('HTTPResponse', 'HTTP Response'),
)
for arg, expected in tests:
assert expected == base.class_name_to_resource_name(arg)
@mock.patch.dict('doctor.docs.base.ALL_RESOURCES',
{'An Object': ExampleObject})
def test_get_resource_object_doc_lines(self):
actual = base.get_resource_object_doc_lines()
expected = [
'Resource Objects',
'----------------',
'.. _resource-an-object:',
'',
'An Object',
'#########',
'ex description f',
'',
'Attributes',
'**********',
'* **str** (*str*) - auth token',
'',
'Example',
'*******',
'.. code-block:: json',
'',
' {',
' "str": "ex str"',
' }'
]
assert expected == actual
@mock.patch.dict('doctor.docs.base.ALL_RESOURCES', {})
def test_get_resource_object_doc_lines_no_resources(self):
"""
This test verifies if we have no | |
# -*- coding: utf-8 -*-
"""Unit test for the actions module.
Copyright (c) 2017 carlosperate https://github.com/carlosperate/
Licensed under the Apache License, Version 2.0 (the "License"):
http://www.apache.org/licenses/LICENSE-2.0
"""
from __future__ import unicode_literals, absolute_import, print_function
import os
import gc
import sys
import shutil
import codecs
import unittest
# Python 2 and 3 compatibility imports
try:
from mock import patch, MagicMock
except ImportError:
from unittest.mock import patch, MagicMock
# This package modules
try:
import ardublocklyserver.actions as actions
except ImportError:
file_dir = os.path.dirname(os.path.realpath(__file__))
package_dir = os.path.dirname(os.path.dirname(file_dir))
sys.path.insert(0, package_dir)
import ardublocklyserver.actions as actions
from ardublocklyserver.compilersettings import ServerCompilerSettings
class ActionsTestCase(unittest.TestCase):
"""Tests for actions module."""
settings = None
temp_folder = None
#
# Test fixtures
#
@classmethod
def setUpClass(cls):
"""Create a temporary folder to play round."""
cls.temp_folder = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'TestTemp_actions')
if os.path.isdir(cls.temp_folder):
raise Exception('Directory %s already exists.' % cls.temp_folder)
os.makedirs(cls.temp_folder)
# Create settings file and check it's a new instance by looking at path
cls.settings = ServerCompilerSettings(cls.temp_folder)
if cls.temp_folder not in cls.settings.get_settings_file_path():
raise Exception('Settings file not created in temp folder:\n'
'\t%s' % cls.settings.get_settings_file_path())
@classmethod
def tearDownClass(cls):
"""Deletes the previously created temporary folder."""
cls.settings._drop()
del cls.settings
cls.settings = None
gc.collect()
if os.path.isdir(cls.temp_folder):
shutil.rmtree(cls.temp_folder)
def setUp(self):
"""Ensure the temp folder and settings file exists."""
if not os.path.isdir(self.__class__.temp_folder):
os.makedirs(self.__class__.temp_folder)
# Create settings file in the TestTemp_actions folder
self.settings.set_default_settings()
def tearDown(self):
"""Delete the temp folder and any files created inside."""
if os.path.isdir(self.__class__.temp_folder):
shutil.rmtree(self.__class__.temp_folder)
#
# Command line tests
#
@patch('ardublocklyserver.compilersettings.os.path.isfile')
@patch('ardublocklyserver.actions.subprocess.Popen', autospec=True)
@patch.object(actions.ServerCompilerSettings, '__new__')
def test_load_arduino_cli_open(
self, mock_settings, mock_popen, mock_isfile):
"""
Tests that a compiler path and arduino sketch path can be set
and that a command line can be launched to open the sketch in the
Arduino IDE.
"""
sketch_path = os.path.join(self.temp_folder, 'sketch.ino')
compiler_dir = 'whatever/arduino'
mock_settings.return_value.compiler_dir = compiler_dir
mock_popen.return_value.communicate.return_value = ('out1', 'out2')
mock_isfile.return_value = True
mock_settings.return_value.load_ide_option = 'open'
success, ide_mode, std_out, err_out, exit_code = \
actions.load_arduino_cli(sketch_path)
mock_popen.assert_called_with([compiler_dir, sketch_path], shell=False)
self.assertTrue(success)
self.assertEqual(ide_mode, 'open')
self.assertNotEqual(std_out, 'out1')
self.assertNotEqual(err_out, 'out2')
self.assertEqual(exit_code, 0)
@patch('ardublocklyserver.compilersettings.os.path.isfile')
@patch('ardublocklyserver.actions.subprocess.Popen', autospec=True)
@patch.object(actions.ServerCompilerSettings, '__new__')
def test_load_arduino_cli_verify(
self, mock_settings, mock_popen, mock_isfile):
"""
Tests that a compiler path and arduino sketch path can be set
and that a command line can be launched to open the sketch in the
Arduino IDE.
"""
sketch_path = os.path.join(self.temp_folder, 'sketch.ino')
compiler_dir = 'whatever/arduino'
board_flag = 'whatever:flag'
mock_settings.return_value.compiler_dir = compiler_dir
mock_settings.return_value.get_arduino_board_flag = MagicMock()
mock_settings.return_value.get_arduino_board_flag.return_value =\
board_flag
mock_popen.return_value.communicate.return_value =\
('out1'.encode('latin-1'), 'out2'.encode('latin-1'))
mock_popen.return_value.returncode = 0
mock_isfile.return_value = True
mock_settings.return_value.load_ide_option = 'verify'
success, ide_mode, std_out, err_out, exit_code = \
actions.load_arduino_cli(sketch_path)
mock_popen.assert_called_with(
[compiler_dir, sketch_path, '--board', board_flag, '--verify'],
shell=False, stderr=-1, stdout=-1)
self.assertTrue(success)
self.assertEqual(ide_mode, 'verify')
self.assertEqual(std_out, 'out1')
self.assertEqual(err_out, 'out2')
self.assertEqual(exit_code, 0)
@patch('ardublocklyserver.compilersettings.os.path.isfile')
@patch('ardublocklyserver.actions.subprocess.Popen', autospec=True)
@patch.object(actions.ServerCompilerSettings, '__new__')
def test_load_arduino_cli_upload(
self, mock_settings, mock_popen, mock_isfile):
"""
Tests that a compiler path and arduino sketch path can be set
and that a command line can be launched to open the sketch in the
Arduino IDE.
"""
sketch_path = os.path.join(self.temp_folder, 'sketch.ino')
compiler_dir = 'whatever/arduino'
board_flag = 'whatever:flag'
port = 'whatever_port'
mock_settings.return_value.compiler_dir = compiler_dir
mock_settings.return_value.get_arduino_board_flag = MagicMock()
mock_settings.return_value.get_arduino_board_flag.return_value = \
board_flag
mock_settings.return_value.get_serial_port_flag = MagicMock()
mock_settings.return_value.get_serial_port_flag.return_value = port
mock_popen.return_value.communicate.return_value = \
('out1'.encode('latin-1'), 'out2'.encode('latin-1'))
mock_popen.return_value.returncode = 0
mock_isfile.return_value = True
mock_settings.return_value.load_ide_option = 'upload'
success, ide_mode, std_out, err_out, exit_code = \
actions.load_arduino_cli(sketch_path)
mock_popen.assert_called_with(
[compiler_dir, sketch_path, '--upload', '--port', port, '--board',
board_flag], shell=False, stderr=-1, stdout=-1)
self.assertTrue(success)
self.assertEqual(ide_mode, 'upload')
self.assertEqual(std_out, 'out1')
self.assertEqual(err_out, 'out2')
self.assertEqual(exit_code, 0)
@unittest.skip('Need to solve multiple locale std out issue first')
@patch('ardublocklyserver.compilersettings.os.path.isfile')
@patch('ardublocklyserver.actions.subprocess.Popen', autospec=True)
@patch.object(actions.ServerCompilerSettings, '__new__')
def test_load_arduino_cli_upload_unicode(
self, mock_settings, mock_popen, mock_isfile):
sketch_path = os.path.join(self.temp_folder, 'sketch.ino')
compiler_dir = 'いろはにほへとちり'
board_flag = 'whatever:flag'
port = 'whatever_port'
mock_settings.return_value.compiler_dir = compiler_dir
mock_settings.return_value.get_arduino_board_flag = MagicMock()
mock_settings.return_value.get_arduino_board_flag.return_value = \
board_flag
mock_settings.return_value.get_serial_port_flag = MagicMock()
mock_settings.return_value.get_serial_port_flag.return_value = port
mock_popen.return_value.communicate.return_value = \
('Γαζέες καὶ μυρτιὲς', 'Âne ex aéquo au whist')
mock_popen.return_value.returncode = 0
mock_isfile.return_value = True
mock_settings.return_value.load_ide_option = 'upload'
success, ide_mode, std_out, err_out, exit_code = \
actions.load_arduino_cli(sketch_path)
mock_popen.assert_called_with(
[compiler_dir, sketch_path, '--upload', '--port', port, '--board',
board_flag], shell=False, stderr=-1, stdout=-1)
self.assertTrue(success)
self.assertEqual(ide_mode, 'upload')
self.assertEqual(std_out, 'out1')
self.assertEqual(err_out, 'out2')
self.assertEqual(exit_code, 0)
def test_load_arduino_cli_sketch_path_invalid(self):
invalid_sketch_path = os.path.join(self.temp_folder, 'bad.ino')
success, ide_mode, std_out, err_out, exit_code = \
actions.load_arduino_cli(invalid_sketch_path)
self.assertFalse(success)
self.assertEqual(ide_mode, 'unknown')
self.assertEqual(std_out, '')
self.assertTrue(invalid_sketch_path in err_out)
self.assertEqual(exit_code, 52)
@patch('ardublocklyserver.compilersettings.os.path.isfile')
@patch.object(actions.ServerCompilerSettings, '__new__')
def test_load_arduino_cli_compiler_dir_invalid(
self, mock_settings, mock_isfile):
mock_settings.return_value.compiler_dir = None
mock_isfile.return_value = True
success, ide_mode, std_out, err_out, exit_code = \
actions.load_arduino_cli(os.path.join(self.temp_folder, 's.ino'))
self.assertFalse(success)
self.assertEqual(ide_mode, 'unknown')
self.assertEqual(std_out, '')
self.assertEqual(exit_code, 53)
@patch('ardublocklyserver.compilersettings.os.path.isfile')
@patch.object(actions.ServerCompilerSettings, '__new__')
def test_load_arduino_cli_load_ide_invalid(
self, mock_settings, mock_isfile):
mock_settings.return_value.compiler_dir = 'compiler_dir'
mock_settings.return_value.load_ide_option = None
mock_isfile.return_value = True
success, ide_mode, std_out, err_out, exit_code = \
actions.load_arduino_cli(os.path.join(self.temp_folder, 's.ino'))
self.assertFalse(success)
self.assertEqual(ide_mode, 'unknown')
self.assertEqual(std_out, '')
self.assertEqual(exit_code, 54)
@patch('ardublocklyserver.compilersettings.os.path.isfile')
@patch.object(actions.ServerCompilerSettings, '__new__')
def test_load_arduino_cli_board_flag_invalid(
self, mock_settings, mock_isfile):
mock_isfile.return_value = True
mock_settings.return_value.compiler_dir = 'compiler_dir'
mock_settings.return_value.load_ide_option = 'upload'
mock_settings.return_value.get_arduino_board_flag = MagicMock()
mock_settings.return_value.get_arduino_board_flag.return_value = None
success, ide_mode, std_out, err_out, exit_code = \
actions.load_arduino_cli(os.path.join(self.temp_folder, 's.ino'))
self.assertFalse(success)
self.assertEqual(ide_mode, 'unknown')
self.assertEqual(std_out, '')
self.assertEqual(exit_code, 56)
@patch('ardublocklyserver.compilersettings.os.path.isfile')
@patch.object(actions.ServerCompilerSettings, '__new__')
def test_load_arduino_cli_port_flag_invalid(
self, mock_settings, mock_isfile):
mock_isfile.return_value = True
mock_settings.return_value.compiler_dir = 'compiler_dir'
mock_settings.return_value.load_ide_option = 'upload'
mock_settings.return_value.get_arduino_board_flag = MagicMock()
mock_settings.return_value.get_arduino_board_flag.return_value = 'avr'
mock_settings.return_value.get_serial_port_flag = MagicMock()
mock_settings.return_value.get_serial_port_flag.return_value = None
success, ide_mode, std_out, err_out, exit_code = \
actions.load_arduino_cli(os.path.join(self.temp_folder, 's.ino'))
self.assertFalse(success)
self.assertEqual(ide_mode, 'unknown')
self.assertEqual(std_out, '')
self.assertEqual(exit_code, 55)
@patch('ardublocklyserver.compilersettings.os.path.isfile')
@patch('ardublocklyserver.actions.subprocess.Popen', autospec=True)
@patch.object(actions.ServerCompilerSettings, '__new__')
def test_load_arduino_cli_exit_code_error(
self, mock_settings, mock_popen, mock_isfile):
sketch_path = os.path.join(self.temp_folder, 'sketch.ino')
compiler_dir = 'whatever/arduino'
board_flag = 'whatever:flag'
mock_settings.return_value.compiler_dir = compiler_dir
mock_settings.return_value.get_arduino_board_flag = MagicMock()
mock_settings.return_value.get_arduino_board_flag.return_value =\
board_flag
mock_popen.return_value.communicate.return_value =\
('out1'.encode('latin-1'), 'out2'.encode('latin-1'))
mock_popen.return_value.returncode = 2
mock_isfile.return_value = True
mock_settings.return_value.load_ide_option = 'verify'
success, ide_mode, std_out, err_out, exit_code = \
actions.load_arduino_cli(sketch_path)
self.assertFalse(success)
self.assertEqual(ide_mode, 'verify')
self.assertEqual(std_out, 'out1')
self.assertTrue(err_out, 'out2')
self.assertEqual(exit_code, 2)
@patch('ardublocklyserver.compilersettings.os.path.isfile')
@patch('ardublocklyserver.actions.subprocess.Popen', autospec=True)
@patch.object(actions.ServerCompilerSettings, '__new__')
def test_load_arduino_cli_exit_code_50(
self, mock_settings, mock_popen, mock_isfile):
sketch_path = os.path.join(self.temp_folder, 'sketch.ino')
compiler_dir = 'whatever/arduino'
board_flag = 'whatever:flag'
mock_settings.return_value.compiler_dir = compiler_dir
mock_settings.return_value.get_arduino_board_flag = MagicMock()
mock_settings.return_value.get_arduino_board_flag.return_value =\
board_flag
mock_popen.return_value.communicate.return_value =\
('out1'.encode('latin-1'), 'out2'.encode('latin-1'))
mock_popen.return_value.returncode = 51 # Will be replaced to 50
mock_isfile.return_value = True
mock_settings.return_value.load_ide_option = 'verify'
success, ide_mode, std_out, err_out, exit_code = \
actions.load_arduino_cli(sketch_path)
self.assertFalse(success)
self.assertEqual(ide_mode, 'verify')
self.assertEqual(std_out, 'out1')
self.assertTrue('51' in err_out)
self.assertEqual(exit_code, 50)
def test_load_arduino_cli_invalid(self):
pass
#
# Tests sketch creation
#
@patch('ardublocklyserver.actions.create_sketch_from_string')
@patch('ardublocklyserver.actions.load_arduino_cli')
def test_arduino_ide_send_code_valid(
self, mock_load_arduino_cli, mock_create_sketch_from_string):
"""Test a valid input to arduino_ide_send_code function.
Because this function basically bridges two functions also tested here
we only need to test they've been called correctly.
:param mock_load_arduino_cli: Mock for load_arduino_cli()
:param mock_create_sketch_from_string: Mock for
create_sketch_from_string()
:return: None.
"""
actions.arduino_ide_send_code('dummy sketch content here')
self.assertTrue(mock_create_sketch_from_string.called)
self.assertTrue(mock_load_arduino_cli.called)
@patch('ardublocklyserver.actions.create_sketch_from_string')
@patch('ardublocklyserver.actions.load_arduino_cli')
def test_arduino_ide_send_code_invalid(
self, mock_load_arduino_cli, mock_create_sketch_from_string):
"""Test an error occurring inside arduino_ide_send_code function call.
Because this function basically bridges two functions also tested here
we only need to test the error condiction caused if the
create_sketch_from_string() function fails.
:param mock_load_arduino_cli: Mock for load_arduino_cli()
:param mock_create_sketch_from_string: Mock for
create_sketch_from_string()
:return: None.
"""
mock_create_sketch_from_string.return_value = None
success, ide_mode, std_out, err_out, exit_code = \
actions.arduino_ide_send_code('dummy sketch content here')
self.assertTrue(mock_create_sketch_from_string.called)
self.assertFalse(mock_load_arduino_cli.called)
self.assertFalse(success)
self.assertEqual(ide_mode, 'unknown')
self.assertIsNone(std_out)
self.assertIsNone(err_out)
self.assertEqual(exit_code, 51)
@patch.object(actions.ServerCompilerSettings, '__new__')
def test_create_sketch_from_string_file(self, mock_settings):
"""Test the create_sketch_from_string creates the file correctly.
:param mock_settings: Mock for ServerCompilerSettings class.
:return: None.
"""
sketch_name = 'test_sketch'
mock_settings.return_value.sketch_dir = self.temp_folder
mock_settings.return_value.sketch_name = sketch_name
sketch_path = os.path.join(
self.temp_folder, sketch_name, sketch_name + '.ino')
self.assertFalse(os.path.exists(sketch_path))
returned_path = actions.create_sketch_from_string('')
self.assertEqual(returned_path, sketch_path)
self.assertTrue(os.path.exists(returned_path))
@patch.object(actions.ServerCompilerSettings, '__new__')
def test_create_sketch_from_string_content(self, mock_settings):
"""Test the create_sketch_from_string creates the file correctly.
:param mock_settings: Mock for ServerCompilerSettings class.
:return: None.
"""
sketch_code = 'いろはにほへとちり Γαζέες καὶ μυρτς Âne aéquo au whist'
mock_settings.return_value.sketch_dir = self.temp_folder
mock_settings.return_value.sketch_name = 'test_sketch'
returned_path = actions.create_sketch_from_string(sketch_code)
with codecs.open(returned_path, 'r', encoding='utf-8') as sketch:
self.assertEqual(sketch.read(), sketch_code)
#
# Tests for getting and setting compiler directory
#
@patch('ardublocklyserver.compilersettings.os.path.isfile')
def test_set_compiler_path_valid(self, mock_isfile):
"""Test set_compiler_path function changes the compiler Setting.
:param mock_isfile: Mock for os.path.isfile() inside accessor.
:return: None.
"""
old_compiler_dir = self.settings.compiler_dir
new_compiler_dir = os.path.join(self.temp_folder, 'arduino_debug.exe')
self.assertNotEqual(old_compiler_dir, new_compiler_dir)
mock_isfile.return_value = True
returned_path = actions.set_compiler_path(new_compiler_dir)
self.assertEqual(returned_path, self.settings.compiler_dir)
self.assertNotEqual(returned_path, old_compiler_dir)
self.assertNotEqual(self.settings.compiler_dir, old_compiler_dir)
# Using in as each OSs will dealt with compiler path differently
self.assertTrue(new_compiler_dir in returned_path)
self.assertTrue(new_compiler_dir in self.settings.compiler_dir)
@patch('ardublocklyserver.compilersettings.os.path.isfile')
def test_set_compiler_path_invalid(self, mock_isfile):
"""Test invalid file path send to set_compiler_path function.
Tests that the set_compiler_path() function does not edit the settings
based on a entered directory that is not valid.
:param mock_isfile: Mock for os.path.isfile().
:return: None.
"""
old_compiler_dir = self.settings.compiler_dir
new_compiler_dir = os.path.join(self.temp_folder, 'arduino_debug.exe')
self.assertNotEqual(old_compiler_dir, new_compiler_dir)
mock_isfile.return_value = False
returned_path = actions.set_compiler_path(new_compiler_dir)
self.assertEqual(returned_path, old_compiler_dir)
self.assertEqual(returned_path, self.settings.compiler_dir)
self.assertNotEqual(returned_path, new_compiler_dir)
self.assertEqual(self.settings.compiler_dir, old_compiler_dir)
self.assertNotEqual(self.settings.compiler_dir, new_compiler_dir)
@patch.object(actions.ServerCompilerSettings, '__new__')
def test_get_compiler_path_valid(self, mock_settings):
"""Test getting a valid compiler path in get_compiler_path.
:param mock_settings: Mock for ServerCompilerSettings constructor.
:return: | |
**func_dict['kwargs'])
for it, batch in self.iter_batch(enumerate(train_loader)):
for func_dict in self._schedule[self._begin_iter_].values():
func_dict['func'](*func_dict['args'], **func_dict['kwargs'])
net.train(True)
batch = utils.batch_to_device(batch, device=device)
solver.zero_grad()
loss = net.train_procedure(batch, *args, **kwargs)
if not (T.isnan(loss) or T.isinf(loss)):
loss.backward()
else:
raise ValueError('NaN or Inf encountered. Training failed!')
solver.step(closure)
if scheduler is not None and scheduler_iter:
scheduler.step()
for func_dict in self._schedule[self._end_iter_].values():
func_dict['func'](*func_dict['args'], **func_dict['kwargs'])
if valid_freq and hasattr(net, 'evaluate'):
if self.iter % valid_freq == 0:
net.eval()
with T.set_grad_enabled(False):
for itt, batch in enumerate(eval_loader):
batch = utils.batch_to_device(batch, device=device)
try:
net.eval_procedure(batch, *args, **kwargs)
except NotImplementedError:
root_logger.exception('An evaluation procedure must be specified')
raise
if scheduler is not None and not scheduler_iter:
scheduler.step()
for func_dict in self._schedule[self._end_epoch_].values():
func_dict['func'](*func_dict['args'], **func_dict['kwargs'])
def _atexit(self):
if self._initialized:
self.flush()
plt.close()
if self.writer is not None:
self.writer.flush()
self.writer.close()
self._q.join()
@check_path_init
def dump_rep(self, name, obj):
"""
saves a string representation of the given object.
:param name:
name of the txt file containing the string representation.
:param obj:
object to saved as string representation.
:return: ``None``.
"""
with open(os.path.join(self.current_folder, name + '.txt'), 'w') as outfile:
outfile.write(str(obj))
outfile.close()
@check_path_init
def dump_model(self, network, use_tensorboard=False, *args, **kwargs):
"""
saves a string representation of the given neural net.
:param network:
neural net to be saved as string representation.
:param use_tensorboard:
use tensorboard to save `network`'s graph.
:param args:
additional arguments to Tensorboard's :meth:`SummaryWriter`
when `use_tensorboard` is ``True``.
:param kwargs:
additional keyword arguments to Tensorboard's :meth:`SummaryWriter`
when `~se_tensorboard` is ``True``.
:return: ``None``.
"""
assert isinstance(network, (
nn.Module, nn.Sequential)), 'network must be an instance of Module or Sequential, got {}'.format(
type(network))
self.dump_rep('network.txt', network)
if use_tensorboard:
self.writer.add_graph(network, *args, **kwargs)
@check_path_init
def backup(self, files_or_folders, ignore=None):
"""
saves a copy of the given files to :attr:`~current_folder`.
Accepts a str or list/tuple of file or folder names.
You can backup your codes and/or config files for later use.
:param files_or_folders:
file to be saved.
:param ignore:
files or patterns to ignore.
Default: ``None``.
:return: ``None``.
"""
assert isinstance(files_or_folders, (str, list, tuple)), \
'unknown type of \'files_or_folders\'. Expect list, tuple or string, got {}'.format(type(files_or_folders))
files_or_folders = (files_or_folders,) if isinstance(files_or_folders, str) else files_or_folders
if ignore is None:
ignore = ()
# filter ignored files
import fnmatch
to_backup = []
for f in files_or_folders:
if not any(fnmatch.fnmatch(f, p) for p in ignore):
to_backup.append(f)
for f in to_backup:
try:
if os.path.isfile(f):
copyfile(f, '%s/%s' % (self.file_folder, os.path.split(f)[-1]))
elif os.path.isdir(f):
copytree(f, '%s/%s' % (self.file_folder, os.path.split(f)[-1]))
except FileNotFoundError:
root_logger.warning('No such file or directory: %s' % f)
@utils.deprecated(backup, '1.1.0')
def copy_files(self, files):
self.backup(files)
@standardize_name
def add_hparam(self, name: str, value):
if name not in self._options[self._hparams].keys():
if isinstance(value, T.Tensor):
value = utils.to_numpy(value)
self._options[self._hparams][name] = value
@standardize_name
def add_metric(self, name: str, value):
if name not in self._options[self._hparam_metrics].keys():
if isinstance(value, T.Tensor):
value = utils.to_numpy(value)
self._options[self._hparam_metrics][name] = value
@standardize_name
def plot(self, name: str, value, smooth=0, filter_outliers=True, **kwargs):
"""
schedules a plot of scalar value.
A :mod:`matplotlib` figure will be rendered and saved every :attr:`~print_freq` iterations.
:param name:
name of the figure to be saved. Must be unique among plots.
:param value:
scalar value to be plotted.
:param smooth:
a value between ``0`` and ``1`` to define the smoothing window size.
See :func:`~neuralnet_pytorch.utils.numpy_utils.smooth`.
Default: ``0``.
:param filter_outliers:
whether to filter out outliers in plot.
This affects only the plot and not the raw statistics.
Default: True.
:param kwargs:
additional options to tensorboard.
:return: ``None``.
"""
self._options[name]['smooth'] = smooth
self._options[name]['filter_outliers'] = filter_outliers
if isinstance(value, T.Tensor):
value = utils.to_numpy(value)
self._num_since_last_flush[name][self.iter] = value
if self.writer is not None:
prefix = kwargs.pop('prefix', 'scalar/')
self.writer.add_scalar(prefix + name.replace(' ', '-'), value, global_step=self.iter, **kwargs)
def plot_hparam(self):
try:
self.writer.add_hparams(dict(self._options[self._hparams]), dict(self._options[self._hparam_metrics]))
except AttributeError:
print('Tensorboard must be initialized to use this feature')
raise
@standardize_name
def plot_matrix(self, name: str, value, labels=None, show_values=False):
"""
plots the given matrix with colorbar and labels if provided.
:param name:
name of the figure to be saved. Must be unique among plots.
:param value:
matrix value to be plotted.
:param labels:
labels of each axis.
Can be a list/tuple of strings or a nested list/tuple.
Defaults: None.
:return: ``None``.
"""
self._options[name]['labels'] = labels
self._options[name]['show_values'] = show_values
if isinstance(value, T.Tensor):
value = utils.to_numpy(value)
self._mat_since_last_flush[name] = value
self._mat_since_beginning[name][self.iter] = value
@standardize_name
def scatter(self, name: str, value, latest_only=False, **kwargs):
"""
schedules a scattor plot of (a batch of) points.
A 3D :mod:`matplotlib` figure will be rendered and saved every :attr:`~print_freq` iterations.
:param name:
name of the figure to be saved. Must be unique among plots.
:param value:
2D or 3D tensor to be plotted. The last dim should be 3.
:param latest_only:
whether to save only the latest statistics or keep everything from beginning.
:param kwargs:
additional options to tensorboard.
:return: ``None``.
"""
self._options[name]['latest_only'] = latest_only
if isinstance(value, T.Tensor):
value = utils.to_numpy(value)
if len(value.shape) == 2:
value = value[None]
self._points_since_last_flush[name][self.iter] = value
if self.writer is not None:
self.writer.add_mesh(name, value, global_step=self.iter, **kwargs)
@standardize_name
def imwrite(self, name: str, value, latest_only=False, **kwargs):
"""
schedules to save images.
The images will be rendered and saved every :attr:`~print_freq` iterations.
There are some assumptions about input data:
- If the input is ``'uint8'`` it is an 8-bit image.
- If the input is ``'float32'``, its values lie between ``0`` and ``1``.
- If the input has 3 dims, the shape is ``[h, w, 3]`` or ``[h, w, 1]``.
- If the channel dim is different from 3 or 1, it will be considered as multiple gray images.
:param name:
name of the figure to be saved. Must be unique among plots.
:param value:
2D, 3D or 4D tensor to be plotted.
The expected shape is ``(H, W)`` for 2D tensor, ``(H, W, C)`` for 3D tensor and
``(N, C, H, W)`` for 4D tensor.
If the number of channels is other than 3 or 1, each channel is saved as
a gray image.
:param latest_only:
whether to save only the latest statistics or keep everything from beginning.
:param kwargs:
additional options to tensorboard.
:return: ``None``.
"""
self._options[name]['latest_only'] = latest_only
if isinstance(value, T.Tensor):
value = utils.to_numpy(value)
if value.dtype != 'uint8':
value = (255.99 * value).astype('uint8')
if len(value.shape) == 3:
value = np.transpose(value, (2, 0, 1))[None]
elif len(value.shape) == 2:
value = value[None, None]
self._img_since_last_flush[name][self.iter] = value
if self.writer is not None:
prefix = kwargs.pop('prefix', 'image/')
self.writer.add_images(prefix + name.replace(' ', '-'), value,
global_step=self.iter, dataformats='NCHW')
@standardize_name
def hist(self, name, value, n_bins=20, latest_only=False, **kwargs):
"""
schedules a histogram plot of (a batch of) points.
A :mod:`matplotlib` figure will be rendered and saved every :attr:`~print_freq` iterations.
:param name:
name of the figure to be saved. Must be unique among plots.
:param value:
any-dim tensor to be histogrammed.
:param n_bins:
number of bins of the histogram.
:param latest_only:
whether to save only the latest statistics or keep everything from beginning.
:param kwargs:
additional options to tensorboard
:return: ``None``.
"""
self._options[name]['latest_only'] = latest_only
self._options[name]['n_bins'] = n_bins
if isinstance(value, T.Tensor):
value = utils.to_numpy(value)
self._hist_since_last_flush[name][self.iter] = value
if self.writer is not None:
prefix = kwargs.pop('prefix', 'hist/')
self.writer.add_histogram(prefix + name.replace(' ', '-'), value, global_step=self.iter, **kwargs)
def schedule(self, func, when=None, *args, **kwargs):
"""
uses to schedule a routine during every epoch in :meth:`~run_training`.
:param func:
a routine to be executed in :meth:`~run_training`.
:param when:
the moment when the ``func`` is executed.
For the moment, choices are:
``'begin_epoch'``, ``'end_epoch'``, ``'begin_iter'``, and ``'end_iter'``.
Default: ``'begin_epoch'``.
:param args:
additional arguments to `func`.
:param kwargs:
additional keyword arguments to `func`.
:return: ``None``
"""
assert callable(func), 'func must be callable'
name = func.__name__
if when is None:
when = self._begin_epoch_
self._schedule[when][name]['func'] = func
self._schedule[when][name]['args'] = args
self._schedule[when][name]['kwargs'] = kwargs
def _plot(self, nums, prints):
fig = plt.figure()
plt.xlabel('iteration')
for name, val in list(nums.items()):
smooth = self._options[name].get('smooth')
filter_outliers = self._options[name].get('filter_outliers')
self._num_since_beginning[name].update(val)
plt.ylabel(name)
x_vals = sorted(self._num_since_beginning[name].keys())
y_vals = [self._num_since_beginning[name][x] for x in x_vals]
max_, min_, med_, mean_ = np.max(y_vals), np.min(y_vals), np.median(y_vals), np.mean(y_vals)
argmax_, argmin_ = np.argmax(y_vals), np.argmin(y_vals)
plt.title('max: {:.8f} at iter {} min: {:.8f} at iter {} \nmedian: {:.8f} mean: {:.8f}'
.format(max_, x_vals[argmax_], min_, x_vals[argmin_], med_, mean_))
x_vals, y_vals = np.array(x_vals), np.array(y_vals)
y_vals_smoothed = utils.smooth(y_vals, smooth)[:x_vals.shape[0]] if smooth else y_vals
plt.plot(x_vals, y_vals_smoothed)
| |
<reponame>deaconjs/SPADE
"""Wrapper for BWidget family of widgets"""
# The authors hereby grant permission to use, copy, modify, distribute,
# and license this software and its documentation for any purpose, provided
# that existing copyright notices are retained in all copies and that this
# notice is included verbatim in any distributions. No written agreement,
# license, or royalty fee is required for any of the authorized uses.
# Modifications to this software may be copyrighted by their authors
# and need not follow the licensing terms described here, provided that
# the new terms are clearly indicated on the first page of each file where
# they apply.
#
# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
# MODIFICATIONS.
#
# GOVERNMENT USE: If you are acquiring this software on behalf of the
# U.S. government, the Government shall have only "Restricted Rights"
# in the software and related documentation as defined in the Federal
# Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you
# are acquiring the software on behalf of the Department of Defense, the
# software shall be classified as "Commercial Computer Software" and the
# Government shall have only "Restricted Rights" as defined in Clause
# 252.227-7013 (c) (1) of DFARs. Notwithstanding the foregoing, the
# authors grant the U.S. Government and others acting in its behalf
# permission to use and distribute the software in accordance with the
# terms specified in this license.
__author__ = "<NAME> <jepler AT unpy DOT net>"
__all__ = """
Entry Label Button ArrowButton ProgressBar ScrollView Separator
MainFrame LabelFrame TitleFrame PanelFrame ScrolledWindow ScrollableFrame
PanedWindow ButtonBox PagesManager NoteBook Dialog StatusBar
LabelEntry ComboBox SpinBox Tree ListBox MessageDialog ProgressDialog
PasswordDialog SelectFont SelectColor SelectColorMenu
CASCADE CHECKBUTTON COMMAND RADIOBUTTON SEPARATOR STATUS PROGRESSION
LINK
""".split()
import Tkinter, types, os, sys, new
ROOT='root'
def _wrap(wrapper, oldfunc):
return new.function(wrapper.func_code, wrapper.func_globals,
oldfunc.func_name, wrapper.func_defaults, wrapper.func_closure)
def returnswidget(f):
def w(self, *args, **kw):
r = f(self, *args, **kw)
return self.nametowidget(str(r))
return _wrap(w, f)
def makeswidget(f, t):
def w(self, *args, **kw):
r = str(f(self, *args, **kw))
try:
return self.nametowidget(r)
except KeyError:
return makewidget(self, t, str(r))
return _wrap(w, f)
def nametowidget(self, name):
"""Return the Tkinter instance of a widget identified by
its Tcl name NAME."""
w = self
if name[0] == '.':
w = w._root()
name = name[1:]
while name:
i = name.find('.')
if i >= 0:
name, tail = name[:i], name[i+1:]
else:
tail = ''
while tail:
try:
w.children[name]
except KeyError:
j = tail.find('.')
if j >= 0:
name, tail = name + "." + tail[:j], tail[j+1:]
else:
name, tail = name + "." + tail, ''
else:
break
w = w.children[name]
name = tail
return w
Tkinter.Misc.nametowidget = nametowidget
def makewidget(master, klass, path):
path = str(path)
self = types.InstanceType(klass)
self._name = path[len(master._w)+1:]
self._w = path
self.children = {}
master.children[self._name] = self
self.master = master
self.tk = master.tk
return self
_datadir = os.path.join("C:\\Users\\Dude\\Desktop\\SPADE\\Dependencies\\pybwidget-0.1.2_1.7.0")
class BWidget:
def _require(self, master):
auto_path = master.tk.call("set", "auto_path")
if not _datadir in auto_path:
master.tk.call("lappend", "auto_path", _datadir)
master.tk.call("package", "require", "BWidget")
def __init__(self, master, cnf={}, **kw):
self._require(master)
Tkinter.Widget.__init__(self, master, self.__class__.__name__, cnf, kw)
# Simple Widgets
class Entry(BWidget, Tkinter.Entry):
def invoke(self):
return self.tk.call(self._w, "invoke")
class Label(BWidget, Tkinter.Label):
def setfocus(self):
return self.tk.call(self._w, "setfocus")
class Button(BWidget, Tkinter.Button): pass
class ArrowButton(BWidget, Tkinter.Button): pass
class ProgressBar(BWidget, Tkinter.Widget): pass
class ScrollView(BWidget, Tkinter.Widget): pass
class Separator(BWidget, Tkinter.Widget): pass
# Manager Widgets
class _Frame:
def getframe(self):
return self.tk.call(self._w, "getframe")
getframe = makeswidget(getframe, Tkinter.Frame)
class _Items:
def itemcget(self, index, option):
return self.tk.call(self._w, "itemcget", index, '-' + option)
def itemconfigure(self, index, cnf=None, **kw):
return self._configure(('itemconfigure', index), cnf, kw)
LINK="link"
CASCADE="cascade"
CHECKBUTTON="checkbutton"
COMMAND="command"
RADIOBUTTON="radiobutton"
SEPARATOR="separator"
STATUS = "status"
PROGRESSION = "progression"
class MainFrame(BWidget, _Frame, Tkinter.Widget):
def addindicator(self, **kw):
return self.tk.call(self._w, "addindicator", *self._options(kw))
addindicator = makeswidget(addindicator, Label)
def getindicator(self, i):
return self.tk.call(self._w, "getindicator", i)
getindicator = returnswidget(getindicator)
def getmenu(self):
return self.tk.call(self._w, "getmenu")
getmenu = returnswidget(getmenu)
def setmenustate(self, tag, state):
return self.tk.call(self._w, "setmenustate", tag, state)
def showstatusbar(self, name):
return self.tk.call(self._w, "showstatusbar", name)
def showtoolbar(self, index, bool_):
return self.tk.call(self._w, "showtoolbar", index, bool_)
class LabelFrame(BWidget, _Frame, Tkinter.Widget):
def align(self, others):
return self.tk.call("LabelFrame::align", self, *others)
class TitleFrame(BWidget, _Frame, Tkinter.Frame): pass
class PanelFrame(BWidget, Tkinter.Frame): pass
class ScrolledWindow(BWidget, _Frame, Tkinter.Frame):
def setwidget(self, child):
return self.tk.call(self._w, "setwidget", child)
class ScrollableFrame(BWidget, _Frame, Tkinter.Frame):
def see(self, w, vert=None, horiz=None):
if vert is None and horiz is None:
return self.tk.call(self._w, "see", w)
return self.tk.call(self._w, "see", w, vert, horiz)
def xview(self, *args):
return self.tk.call(self._w, "xview", *args)
def yview(self, *args):
return self.tk.call(self._w, "yview", *args)
class PanedWindow(BWidget, Tkinter.Frame):
def add(self, **kw):
return self.tk.call(self._w, "add", *self._options(kw))
add = makeswidget(add, Tkinter.Frame)
def getframe(self, index):
return self.tk.call(self._w, "getframe", index)
getframe = makeswidget(getframe, Tkinter.Frame)
class ButtonBox(BWidget, _Items, Tkinter.Frame):
def add(self, **kw):
return self.tk.call(self._w, "add", *self._options(kw))
add = makeswidget(add, Button)
def delete(self, index):
self.tk.call(self._w, "delete", index)
def index(self, item):
self.tk.call(self._w, "index", item)
def insert(self, index, *kw):
return self.tk.call(self._w, "insert", index, *self._options(kw))
insert = makeswidget(insert, Button)
def invoke(self, index):
return self.tk.call(self._w, "invoke", index)
def setfocus(self, index):
return self.tk.call(self._w, "setfocus", index)
class PagesManager(BWidget, Tkinter.Frame):
def add(self, page):
return self.tk.call(self._w, "add", page)
add = makeswidget(add, Tkinter.Frame)
def compute_size(self):
return self.tk.call(self._w, "compute_size")
def delete(self, page):
return self.tk.call(self._w, "delete", page)
def getframe(self, page):
return self.tk.call(self._w, "delete", page)
getframe = makeswidget(getframe, Tkinter.Frame)
def pages(self, *args):
return self.tk.call(self._w, "pages", *args)
def raise_page(self, page=None):
if page is None:
return self.tk.call(self._w, "raise")
return self.tk.call(self._w, "raise", page)
class NoteBook(BWidget, Tkinter.Frame, _Items):
def bindtabs(self, event, func):
if callable(func):
command = self.register( func )
else:
command = func
return self.tk.call(self._w, "bindtabs", event, command)
def delete(self, page, destroyframe=True):
return self.tk.call(self._w, "delete", page, destroyframe)
def insert(self, index, page, **kw):
return self.tk.call(self._w, "insert", index, page, *self._options(kw))
insert = makeswidget(insert, Tkinter.Frame)
def move(self, page, index):
return self.tk.call(self._w, "move", page, index)
def see(self, page):
return self.tk.call(self._w, "see", page)
# XXX these methods are from PagesManager but inheritance
# won't work, because NoteBook has no 'add' command
def compute_size(self):
return self.tk.call(self._w, "compute_size")
def delete(self, page):
return self.tk.call(self._w, "delete", page)
def getframe(self, page):
return self.tk.call(self._w, "delete", page)
getframe = makeswidget(getframe, Tkinter.Frame)
def pages(self, *args):
return self.tk.call(self._w, "pages", *args)
def raise_page(self, page=None):
if page is None:
return self.tk.call(self._w, "raise")
return self.tk.call(self._w, "raise", page)
class Dialog(ButtonBox, Tkinter.BaseWidget, _Frame):
def draw(self, focus=None):
if focus is None:
return self.tk.call(self, "draw")
return self.tk.call(self._w, "draw", focus)
def enddialog(self):
return self.tk.call(self._w, "enddialog")
def withdraw(self):
return self.tk.call(self._w, "withdraw")
class StatusBar(BWidget): pass
class LabelEntry(Entry): pass
class ComboBox(Entry):
def bind_entry(self, *args):
return self.tk.call(self._w, "bind", *args)
def getlistbox(self):
r = str(self.tk.call(self._w, "getlistbox"))
try:
return self.nametowidget(r)
except KeyError:
c = self.tk.call("winfo", "class", r)
if c == "ListBox":
return makewidget(self, ListBox, r)
else:
return makewidget(self, Tkinter.Listbox, r)
def getvalue(self):
return self.tk.call(self._w, "getvalue")
def post(self):
return self.tk.call(self._w, "post")
def setvalue(self, index):
return self.tk.call(self._w, "setvalue", index)
def unpost(self):
return self.tk.call(self._w, "unpost")
class SpinBox(Entry):
def bind_entry(self, *args):
return self.tk.call(self._w, "bind", *args)
def setvalue(self, index):
return self.tk.call(self._w, "setvalue", index)
def getvalue(self):
return self.tk.call(self._w, "getvalue")
class Tree(BWidget, Tkinter.Widget, _Items):
def bind_image(self, event, func):
if callable(func):
command = self.register( func )
else:
command = func
return self.tk.call(self._w, "bindImage", event, command)
bindImage = bind_image
def bind_text(self, event, func):
if callable(func):
command = self.register( func )
else:
command = func
return self.tk.call(self._w, "bindText", event, command)
bindText = bind_text
def closetree(self, node):
return self.tk.call(self._w, "closetree", node)
def delete(self, arg, *args):
return self.tk.call(self._w, "delete", arg, *args)
def edit(self, node, text, *args):
"edit(self, node, text, verifycmd=None, clickres=None, select=None)"
return self.tk.call(self._w, "edit", node, text, *args)
def exists(self, node):
return self.tk.call(self._w, "exists", node)
def index(self, node):
return self.tk.call(self._w, "index", node)
def insert(self, index, parent, node="#auto", **kw):
return self.tk.call(self._w, "insert", index, parent, node,
*self._options(kw))
def move(self, parent, node, index):
return self.tk.call(self._w, "move", parent, node, index)
def nodes(self, node, *args):
return self.tk.call(self._w, "nodes", node, *args)
def opentree(self, node, recurse=True):
return self.tk.call(self._w, "opentree", node, recurse)
def parent(self, node):
return self.tk.call(self._w, "parent", node)
def reorder(self, node, neworder):
return self.tk.call(self._w, "reorder", node, neworder)
| |
nearest_stream.get('closest_stream_point')
stream_name = nearest_stream.get('gnis_name', '')
stream_feature_id = nearest_stream.get('linear_feature_id', None)
point_on_stream = shape(nearest_stream_point)
return (stream_feature_id, stream_name, point_on_stream)
def get_watershed_id_at_point(db: Session, point: Point):
"""
given a point (degrees lng lat / EPSG:4326), return the ID
of the watershed containing it.
"""
q = db.query(FreshwaterAtlasWatersheds.WATERSHED_FEATURE_ID).filter(
func.ST_Contains(
FreshwaterAtlasWatersheds.GEOMETRY,
func.ST_GeomFromText(
point.wkt, 4326)
)
)
watershed_id = q.first()
if not watershed_id:
if WATERSHED_DEBUG:
logger.warning("No watershed found")
return None
watershed_id = watershed_id[0]
if WATERSHED_DEBUG:
logger.info("watershed id %s", watershed_id)
return watershed_id
def get_upstream_watershed_polygon_count(db: Session, watershed_id: int) -> int:
"""Returns the number of polygons upstream from watershed_id.
This helps if we want to determine if a group of watershed polygons
originating from `watershed_id` will take too long to dissolve or will
take too long to do GIS operations on the resulting area.
The average polygon size is (very roughly) 0.2 to 0.3 square km, so if
we want to approximately predict whether an upstream area might be more than
1000 square km, we can check to ensure there are less than about 4000 polygons.
This query can be expensive but is much faster than attempting a full watershed
delineation and finding out too late that we're working in an unmanageable large area.
"""
q = """
with subwscode_ltree as (
SELECT "WATERSHED_FEATURE_ID" as origin_id,
wscode_ltree as origin_wscode,
localcode_ltree as origin_localcode,
ltree2text(subpath(localcode_ltree, -1))::integer as downstream_tributary,
nlevel(localcode_ltree) as downstream_tributary_code_pos
FROM freshwater_atlas_watersheds
WHERE "WATERSHED_FEATURE_ID" = :watershed_feature_id
)
SELECT count(*)
FROM freshwater_atlas_watersheds
WHERE wscode_ltree <@ (select origin_wscode from subwscode_ltree)
AND ltree2text(subltree(
localcode_ltree || '000000'::ltree,
(select downstream_tributary_code_pos from subwscode_ltree) - 1,
(select downstream_tributary_code_pos from subwscode_ltree) - 0
))::integer >= (select downstream_tributary from subwscode_ltree)
AND (NOT wscode_ltree <@ (select origin_localcode from subwscode_ltree) OR (select origin_wscode from subwscode_ltree) = (select origin_localcode from subwscode_ltree))
"""
res = db.execute(q, {"watershed_feature_id": watershed_id})
record = res.fetchone()
if not record or not record[0]:
logger.warning(
'unable to calculate polygon count from watershed feature id %s', watershed_id)
return None
return record[0]
def calculate_watershed(
db: Session,
user,
click_point: Point = None,
watershed_id: int = None,
hydat_station_number: str = None,
upstream_method='DEM+FWA',
dem_source='cdem'
) -> GeneratedWatershedDetails:
""" estimates the watershed area upstream of a POI and returns a GeneratedWatershedDetails object.
It uses one of several methods to estimate the upstream area, described below.
Optional arguments:
upstream_method: 'FWA+FULLSTREAM', 'FWA+UPSTREAM', 'DEM', or 'DEM+FWA'.
FWA+FULLSTREAM: Use the Freshwater Atlas to return the catchment area of the entire
selected stream.
FWA+UPSTREAM: Estimate the upstream catchment area using only the Freshwater Atlas.
This will be accurate to the FWA linework around the outer perimeter
but will over or under-estimate the area around the point of interest (because
the watershed polygons forming the catchment area will not be split, even if
the point of interest is in the middle of a polygon)
DEM: Use the DEM (Digital Elevation Model) to delineate the catchment.
WhiteboxTools is used. See `get_watershed_using_dem` for more info.
This is more accurate around the point of interest but the outer reaches
are less smooth than the FWA linework and may slightly under or overestimate
around the outer perimeter of the watershed.
DEM+FWA: Attempt to combine both the DEM and FWA methods by using the DEM close
to the point of interest, and the FWA around the outer perimeter. This
is the default.
"""
start = time.perf_counter()
warnings = []
if click_point and watershed_id:
raise ValueError(
"Do not provide both point and watershed_id at the same time")
if not click_point and not watershed_id and not hydat_station_number:
raise ValueError(
"Must provide either a starting point (lat, long), a starting watershed ID, or a HYDAT station number")
if watershed_id and not upstream_method.startswith("FWA"):
raise ValueError(
f"Starting watershed ID incompatible with {upstream_method}. Use only FWA methods.")
if WATERSHED_DEBUG:
logger.info("calculating watershed")
stream_name = ''
point_on_stream = None
stream_feature_id = None
# if this watershed is based on a HYDAT station, look up the lat/long.
# Since sometimes the lat/long can be incorrect or still in an ambigious location
# (e.g. at a confluence where the station point may be closer to a tributary than
# the centreline of the stream being monitored), correct the point onto the stream
# in the Station name.
if hydat_station_number:
point_on_stream, stream_feature_id, click_point = get_point_on_stream(db, hydat_station_number)
elif click_point:
# move the click point to the nearest stream based on the FWA Stream Networks.
# this will make it easier to snap the point to the Flow Accumulation raster
# we will generate.
nearest_stream = get_nearest_streams(db, click_point, limit=1)[0]
stream_feature_id, stream_name, point_on_stream = get_nearest_stream_name_id(nearest_stream)
stream_distance = transform(
transform_4326_3005, point_on_stream
).distance(
transform(transform_4326_3005, click_point))
# create a warning if the distance between the stream and click point is
# greater than stream_distance_warning_threshold
stream_distance_warning_threshold = 500
if stream_distance > stream_distance_warning_threshold:
point_not_on_stream_warning = WatershedDataWarning(
message=f"This point is more than {stream_distance_warning_threshold} m from the nearest stream" +
" (based on Freshwater Atlas stream mapping)." +
" WALLY's Surface Water Analysis is applicable to points of interest along streams." +
" If you believe this is an error, please contact the WALLY team."
)
warnings.append(point_not_on_stream_warning)
if point_on_stream:
# get the ID of the watershed we are starting in.
# this will be used later to help with queries against the fundamental watersheds.
watershed_id = get_watershed_id_at_point(db, point_on_stream)
watershed_point = None
watershed = None
# declare some variables that will be used for watershed metadata.
dem_error = None
is_near_border = bool(len(watershed_touches_border(db, watershed_id)))
# the location of point after correcting/snapping to a stream.
# This means either snapping to a vector FWA stream or using a SnapPourPoint
# routine (SnapPourPoints or JensonSnapPourPoints). If both, this value
# will be from the SnapPourPoint routine since that will be done after
# moving the point to a stream.
snapped_point = point_on_stream or click_point
# choose method based on function argument.
if upstream_method.startswith('DEM'):
# estimate the watershed using the DEM
(watershed, snapped_point, dem_error) = get_watershed_using_dem(
db, point_on_stream, stream_feature_id, watershed_id,
dem_source=dem_source, use_fwa=upstream_method == 'DEM+FWA')
watershed_source = "Estimated using CDEM and WhiteboxTools."
generated_method = 'generated_dem'
watershed_point = base64.urlsafe_b64encode(
point_on_stream.wkb).decode('utf-8')
if dem_error:
dem_error_warning = WatershedDataWarning(
message="Your watershed could not be refined to the dropped point. There may be significant" +
" extra area downstream of your point of interest, or other errors. Please carefully verify the watershed" +
" boundary."
)
warnings.append(dem_error_warning)
# ensure that we stop here if the watershed touches a border.
if upstream_method == 'DEM+FWA' and is_near_border:
no_cross_border_fwa_warning = WatershedDataWarning(
message=f"This watershed was delineated from a DEM without using the Freshwater Atlas" +
" because it is close to or crosses a boundary with another jurisdiction where FWA data" +
" is not available. Please verify estimated watershed boundary."
)
warnings.append(no_cross_border_fwa_warning)
# if using DEM+FWA and not near a border, add the source info for CDEM/WBT/FWA.
elif upstream_method == 'DEM+FWA':
watershed_source = "Estimated by combining the result from CDEM/WhiteboxTools " + \
"with Freshwater Atlas fundamental watershed polygons."
generated_method = 'generated_dem_fwa'
elif upstream_method == 'FWA+UPSTREAM':
watershed = get_upstream_catchment_area(db, watershed_id)
watershed_source = "Estimated by combining Freshwater Atlas watershed polygons that are " + \
"determined to be part of the selected stream based on FWA_WATERSHED_CODE and upstream " + \
"of the selected point based on LOCAL_WATERSHED_CODE. Note: the watershed polygon " + \
"containing the selected point is included."
generated_method = 'generated'
watershed_point = watershed_id
elif upstream_method == 'FWA+FULLSTREAM':
watershed = get_full_stream_catchment_area(db, watershed_id)
watershed_source = "Estimated by combining Freshwater Atlas watershed polygons that are " + \
"determined to be part of the selected stream based on FWA_WATERSHED_CODE."
generated_method = 'generated_full_stream'
watershed_point = watershed_id
else:
raise ValueError(
f"Invalid method {upstream_method}. Valid methods are DEM, DEM+FWA, FWA+UPSTREAM, FWA+FULLSTREAM")
feature = Feature(
geometry=watershed,
id=f"{generated_method}.{watershed_point}",
properties={
"name": f"Estimated catchment area {f'({stream_name})' if stream_name else ''}",
"watershed_source": watershed_source,
"stream_name": stream_name
}
)
if not feature:
# was not able to calculate a watershed with the provided params.
# return None; the calling function will skip this calculated watershed
# and return other pre-generated ones.
logger.info(
"skipping calculated watershed based on watershed feature | |
Label(self.matrixOfGraphLabel, text="Matrix Entry", bg=self.color_main_content,
fg=self.color_red,
bd=0, anchor=W)
self.matrixLabel.place(relx=0, rely=0.05, relheight=0.1, relwidth=0.8)
self.matrixLabel['font'] = font1
#self.text_matrix_1 = "Please Enter matrix separated by ','(example: 0010,1101,0101,1111 => 4*4 matrix)"
self.matrixEntry = MyEntry(self.matrixOfGraphLabel, 0, 0.18, 1, 0.22, self.color_main_content, "black",
font.Font(family="Times", size=9, slant="italic", weight='bold'), self.text_matrix_1)
self.validateMatrixMessage = Label(self.matrixOfGraphLabel, bg=self.color_main_content, text="",
fg=self.color_red)
self.validateMatrixMessage['font'] = font3
self.validateMatrixMessage.place(relx=0, rely=0.52, relheight=0.22, relwidth=0.7)
self.validateMatrixEntry = MyButton(self.matrixOfGraphLabel, 0.75, 0.5, 0.25, 0.25, self.color_main_content,
self.ButtonValidate,
lambda event: self.validateCustomizableMatrix(event, self.matrixEntry,
self.validateMatrixMessage))
# Button of after and back
self.optionBackButton = MyButton(self.frameRight2, 0.02, 0.9, 0.1, 0.1, self.color_main_content,
self.ButtonBack, self.OptionBack)
self.optionNextButton = MyButton(self.frameRight2, 0.85, 0.9, 0.15, 0.1, self.color_main_content,
self.ButtonNext, self.OptionNext)
self.optionNextButton.place_forget()
## Starting working on the main content => frameRight3
# Creating graph container
self.drawGraphLabel = LabelFrame(self.frameRight3, bd=0, bg=self.color_main_content, text="")
self.drawGraphLabel.place(relx=0, rely=0, relwidth=1, relheight=1)
# frame graph content
self.drawgraphtextlabel = Label(self.drawGraphLabel, bg=self.color_main_content, text="Graph Representation",
fg=self.color_red)
self.drawgraphtextlabel.place(relx=0.05, rely=0, relheight=0.05, relwidth=0.9)
self.drawgraphtextlabel['font'] = font1
self.drawGraphContainer = LabelFrame(self.drawGraphLabel, bg="white", bd=0)
self.drawGraphContainer.place(relx=0.05, rely=0.06, relwidth=0.9, relheight=0.75)
self.GraphAlgorithmOptionLabel = LabelFrame(self.drawGraphLabel, bg=self.color_main_content, text="", bd=0)
self.GraphAlgorithmOptionLabel.place(relx=0.05, rely=0.81, relheight=0.11, relwidth=0.9)
self.textEntry1 = "Enter the Starting Vertex"
self.GraphAlgorithmEntry1 = MyEntry(self.GraphAlgorithmOptionLabel, 0.05, 0.25, 0.3, 0.4,
self.color_main_content,
self.color_red, font2, self.textEntry1)
self.textEntry2 = "Enter the Ending Vertex"
self.GraphAlgorithmEntry2 = MyEntry(self.GraphAlgorithmOptionLabel, 0.36, 0.25, 0.3, 0.4,
self.color_main_content,
self.color_red, font2, self.textEntry2)
self.GraphAlgorithmButton = MyButton(self.GraphAlgorithmOptionLabel, 0.73, 0.17, 0.27, 0.6,
self.color_main_content,
self.ButtonRunAlgorithm,
self.validateAlgoPar)
# after and back buttons
self.processingBackButton = MyButton(self.frameRight3, 0.02, 0.923, 0.1, 0.077, self.color_main_content,
self.ButtonBack, self.ProcessingBack)
self.processingExitButton = MyButton(self.frameRight3, 0.84, 0.92, 0.15, 0.08, self.color_main_content,
self.ButtonExit, self.Exit)
self.GraphResetButton = MyButton(self.frameRight3, 0.45, 0.908, 0.12, 1-0.908, self.color_main_content,
self.ButtonReset,
lambda event, frame=self.frameRight3, btn=self.processingButton, type="Graph":
self.reset(event, frame, btn, type))
## Starting working on the main content => frameRight3
# Creating PathFinding container
self.drawPathLabel = LabelFrame(self.frameRight4, bd=0, bg=self.color_main_content, text="")
self.drawPathLabel.place(relx=0, rely=0, relwidth=1, relheight=1)
# Pathfinding content
self.pathTextLabel = Label(self.drawPathLabel, bg=self.color_main_content, text="The Pathfinding Problem",
fg=self.color_red)
self.pathTextLabel.place(relx=0.025, rely=0, relheight=0.08, relwidth=0.73)
self.pathTextLabel['font'] = font2
self.pathCanvas = Canvas(self.drawPathLabel, bg=self.color_main_content, width=525, height=450, bd=0,
highlightthickness=1)
self.pathCanvas.place(relx=0.025, rely=0.085)
self.runButton = MyButton(self.drawPathLabel, 0.825, 0.2, 0.1, 0.1, self.color_main_content, self.RunImage,
lambda event: self.runPath(event))
self.clearButton = MyButton(self.drawPathLabel, 0.82, 0.4, 0.12, 0.1, self.color_main_content, self.ClearImage,
lambda event: self.clearPath(event))
self.clearButton.disable()
self.pathBackButton = MyButton(self.frameRight4, 0.02, 0.923, 0.1, 0.077, self.color_main_content,
self.ButtonBack,
lambda event: self.pathBack(event))
self.pathExitButton = MyButton(self.frameRight4, 0.85, 0.923, 0.12, 1 - 0.923, self.color_main_content,
self.ButtonExit, self.Exit)
self.pathResetButton = MyButton(self.frameRight4, 0.45, 0.908, 0.12, 1-0.908, self.color_main_content,
self.ButtonReset,
lambda event, frame=self.frameRight4, btn=self.processingButton, type="Path":
self.reset(event, frame, btn, type))
def NextPage(self, event, first_frame, second_frame, btn1, btn2):
first_frame.place_forget()
second_frame.place(relx=0.2, rely=0, relwidth=0.8, relheight=1)
btn1.ChangeBackgroundColor(self.color_menu)
btn2.ChangeBackgroundColor(self.color_main_content)
def BackPage(self, event, first_frame, second_frame, btn1, btn2):
first_frame.place(relx=0.2, rely=0, relwidth=0.8, relheight=1)
second_frame.place_forget()
btn2.ChangeBackgroundColor(self.color_menu)
btn1.ChangeBackgroundColor(self.color_main_content)
# Methods of the Input Page
def inputChoice(self, num):
self.inputNextButton.place_forget()
if num == 1:
self.graphLabel.place_forget()
self.pathLabel.place(relx=0.1, rely=0.31, relheight=0.6, relwidth=0.8)
self.optionInput = 1
else:
self.pathLabel.place_forget()
self.graphLabel.place(relx=0.1, rely=0.31, relheight=0.6, relwidth=0.8)
def validatePath(self, event):
try:
self.column = int(self.numberOfColumnsEntry.get())
self.row = int(self.numberOfRowsEntry.get())
text1 = f"Validation complete: \nColumn = {self.column} \nRow = {self.row}"
self.optionButton = True
self.validatepathText.configure(text=text1)
self.inputNextButton.show()
except ValueError:
self.optionButton = False
self.validatepathText.configure(text="")
messagebox.showerror("Error", "Please Enter A Valid Integer Number")
def graphChoice(self, num):
self.inputNextButton.place_forget()
self.isMatrixOk = False
if num == 1:
self.matrixOfGraphLabel1.place(relx=0.1, rely=0.42, relheight=0.55, relwidth=0.8)
self.matrixLabel.place(relx=0, rely=0.05, relheight=0.1, relwidth=0.8)
self.matrixEntry.show()
self.validateMatrixMessage.place(relx=0, rely=0.52, relheight=0.22, relwidth=0.7)
self.validateMatrixEntry.show()
self.graphLabelImage1.place(relx=0, rely=0.3, relwidth=0.3)
self.graphLabel2.place(relx=0.31, rely=0.3, relwidth=0.69, relheight=1)
self.graphLabelImage2.place_forget()
self.graphLabel3.place_forget()
self.optionInput = 2
else:
self.graphLabelImage2.place(relx=0, rely=0.3, relwidth=0.3)
self.graphLabel3.place(relx=0.31, rely=0.3, relwidth=0.69, relheight=1)
self.graphLabelImage1.place_forget()
self.graphLabel2.place_forget()
self.optionInput = 3
self.matrixOfGraphLabel1.place_forget()
self.matrixLabel.place_forget()
self.matrixEntry.place_forget()
self.validateMatrixMessage.place_forget()
self.validateMatrixEntry.place_forget()
def validateVertices(self, event):
try:
self.numVertices = int(self.numberOfVerticesEntry.get())
text = f"Value : {self.numVertices}"
self.validateVerticesButtonMessage.configure(text=text)
self.inputNextButton.show()
except ValueError:
messagebox.showerror("Error", "Please Enter A Valid Integer Number")
self.validateVerticesButtonMessage.configure(text="")
def OpenFile(self, event):
self.filepath = askopenfilename(initialdir="/", title="Select A File",
filetypes=(("text files", "*.txt"), ('All files', '*.*')))
if self.filepath != "":
self.titleOfFile.configure(text=self.filepath)
self.isFileSelected = True
self.isFileRead = False
else:
self.isFileSelected = False
self.isFileRead = False
def squaredMatrix(self, matrix):
return all(len(row) == len(matrix) for row in matrix)
def ReadFile(self, event):
self.listOfFile = list() # Make sure that the fonction works correctly
self.isMatrixValide = True # edit to initial value important!
if self.isFileSelected and (".txt" in self.filepath):
with open(self.filepath, "r") as f:
try:
li = [i for i in f.readlines()]
self.listOfFile = [[int(i) for i in li[j].split()] for j in range(len(li))]
for i in range(len(self.listOfFile)):
for j in range(len(self.listOfFile[i])):
if self.listOfFile[i][j] not in {1, 0}:
self.isMatrixValide = False
break # Not necessary to check all elements if one doesn't respect the condition
# Doing condition on whether the matrix is valide or not (contains only 1 and 0)
if not self.isMatrixValide:
self.readMessage.configure(text="Please enter a matrix where it's elements are 1 or 0")
else:
if self.squaredMatrix(self.listOfFile):
self.readMessage.configure(text="Matrix Saved Successfully")
self.inputNextButton.show()
self.numVertices = len(self.listOfFile) # check if this line works !!
self.isFileRead = True
else:
self.readMessage.configure(text="Please enter a squared Matrix")
except ValueError:
messagebox.showerror("Error",
"Please Check if the matrix is valid\nand each element is separated by space")
else:
messagebox.showerror("Error", "Please Enter A valid Path to a text file")
self.titleOfFile.configure(text="No Path Selected..")
def InputBack(self, event):
self.frameRight1.place_forget()
self.frameRight.place(relx=0.2, rely=0, relwidth=0.8, relheight=1)
# global self.color_main_content
self.questionButton.ChangeBackgroundColor(self.color_main_content)
self.inputButton.ChangeBackgroundColor(self.color_menu)
self.var1.set(None)
self.var2.set(None)
self.graphLabel.place_forget()
self.pathLabel.place_forget()
def InputNext(self, event):
self.var3.set(None)
self.isMatrixOk = False
if self.optionInput == 1:
self.NextPage(event, self.frameRight1, self.frameRight4, self.inputButton, self.processingButton)
self.test = PathFindingBfs(self.pathCanvas, self.column, self.row, 525, 450)
elif self.optionInput == 2 or self.optionInput == 3:
if self.optionInput == 2:
self.isMatrixOk = False
else:
self.isMatrixOk = True
self.NextPage(event, self.frameRight1, self.frameRight2, self.inputButton, self.choiceButton)
else:
pass
## Beginning the methods of the third page => frameRight2
def OptionBack(self, event):
self.BackPage(event, self.frameRight1, self.frameRight2, self.inputButton, self.choiceButton)
def OptionNext(self, event):
if self.isMatrixOk and self.isNameOk:
self.NextPage(event, self.frameRight2, self.frameRight3, self.choiceButton,
self.processingButton) # change it to next
self.Graph = DrawGraph(self.drawGraphContainer, self.NameVertices,
self.listOfFile)
else:
messagebox.showerror("Error", "Please Check Your Entry")
def nameOption(self, num):
if num == 1:
list1 = list(range(1, self.numVertices + 1))
list1 = [str(i) for i in list1]
self.NameVertices = list1
self.matrixOfGraphLabel.place(relx=0.1, rely=0.3, relwidth=0.8, relheight=0.3)
self.customizableNameOption.place_forget()
self.isNameOk = True
elif num == 2:
self.matrixOfGraphLabel.place(relx=0.1, rely=0.3, relwidth=0.8, relheight=0.3)
self.customizableNameOption.place_forget()
if self.numVertices > len(self.alphabets):
messagebox.showerror("error", "Number Of Names surpassed the limit\nPlease choose other option")
self.isNameOk = False
else:
self.NameVertices = self.alphabets[0:self.numVertices]
self.isNameOk = True
else: # means num = 3
self.customizableNameOption.place(relx=0.1, rely=0.3, relwidth=0.8, relheight=0.55)
self.matrixOfGraphLabel.place_forget()
self.isNameok = False
if self.isMatrixOk and self.isNameOk:
self.optionNextButton.show()
else:
self.optionNextButton.place_forget()
def listCheckLength(self, list1):
return all(len(element) < 3 for element in list1)
# validate names of vertices in a list called : self.NameVertices
def validateCustomizableName(self, event):
names = self.labelNameEntry.get()
if len(names) == 0 or names == self.text_name_entry:
self.labelNameMessage.configure(text="")
messagebox.showwarning("Warning", "Please Enter Valid Names")
self.labelNameEntry.insert(0, self.text_name_entry)
self.isNameOk = False
else:
list1 = names.split(",")
if not self.listCheckLength(list1):
self.labelNameMessage.configure(text="")
messagebox.showwarning("Warning", "Please make sure each name has 1 or 2 characters")
self.labelNameEntry.insert(0, names)
self.isNameOk = False
else:
if len(list1) != self.numVertices:
self.labelNameMessage.configure(text="")
messagebox.showwarning("Warning", "Number of vertices and names does not match")
self.labelNameEntry.insert(0, names)
self.isNameOk = False
else:
self.NameVertices = list1
self.labelNameMessage.configure(text="Names Saved Successfully")
self.isNameOk = True
if self.isMatrixOk and self.isNameOk:
self.optionNextButton.show()
else:
self.optionNextButton.place_forget()
# basically this function return the list of names and a boolean variable(self.isNameOk) to know if
# name is ok
def validateCustomizableMatrix(self, event, entry1, message): # was entry1, message
self.isMatrixValide = True
message.configure(text="")
entry = entry1.get()
if len(entry) == 0 or entry == self.text_matrix_1:
messagebox.showwarning("Error", "Please Enter A valid matrix")
entry1.configure(text=self.text_matrix_1)
else:
try:
splited = entry.split(",") # spliting the string to a list of rows
list1 = [list(i) for i in splited] # rows and columns but it's a string
for i in range(len(list1)):
for j in range(len(list1[i])):
list1[i][j] = int(list1[i][j])
# Start the testing on the list
if not self.squaredMatrix(list1):
self.isMatrixOk = False
message.configure(text="Please Enter a squared matrix")
elif len(list1) != self.numVertices:
self.isMatrixOk = False
messagebox.showwarning("Matrix Dimension Error",
f"Please Enter a matrix of {self.numVertices}x{self.numVertices}")
else:
for i in range(len(list1)):
for j in range(len(list1[i])):
if list1[i][j] not in {0, 1}:
self.isMatrixValide = False
break
if self.isMatrixValide:
self.listOfFile = list1
message.configure(text="Matrix Registered successfully")
self.isMatrixOk = True
else:
messagebox.showerror("Error", "All values of the matrix must be either 1 or 0")
self.isMatrixOk = False
except ValueError:
self.isMatrixOk = False
messagebox.showwarning("Error", "Please make sure that all elements are either 0 or 1")
if self.isMatrixOk and self.isNameOk:
self.optionNextButton.show()
else:
self.optionNextButton.place_forget()
## Beginning the methods of the third page => frameRight3
def ProcessingBack(self, event):
self.BackPage(event, self.frameRight2, self.frameRight3, self.choiceButton, self.processingButton)
def Exit(self, event):
self.master.destroy()
def validateAlgoPar(self, event):
start = self.GraphAlgorithmEntry1.get()
end = self.GraphAlgorithmEntry2.get()
if (start in | |
<reponame>earthlab/firedpy
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
from collections import OrderedDict
import datetime as dt
import gc
import geopandas as gpd
from getpass import getpass
from glob import glob
from io import BytesIO
from multiprocessing import cpu_count, Pool
from netCDF4 import Dataset
import numpy as np
import os
import pandas as pd
import pycurl
import rasterio
from rasterio import logging
from rasterio.merge import merge
import xarray as xr
import rioxarray as rxr
from shapely.geometry import Point, Polygon, MultiPolygon
import sys
from tqdm import tqdm
import requests
import warnings
import paramiko
# The python gdal issue (matching system gdal version)
try:
from osgeo import gdal, ogr, osr
except ImportError:
raise ImportError(""" Unfortunately, you still need to install GDAL for
Python. Try pip install `pygdal==version` where the
version matches the first three digits of the output from
the command `gdalinfo --version`. To see available pygdal
versions run `pip install pygdal== '
""")
# MODIS CRS retrieved from a single HDF file
outCRS = '''PROJCS["unnamed",GEOGCS["Unknown datum based upon the custom spheroid",
DATUM["Not specified (based on custom spheroid)",
SPHEROID["Custom spheroid",6371007.181,0]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]]],
PROJECTION["Sinusoidal"],
PARAMETER["longitude_of_center",0],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
UNIT["Meter",1],
AXIS["Easting",EAST],AXIS["Northing",NORTH]]'''
# Suppress rasterio errors for now
log = logging.getLogger()
log.addFilter(rasterio.errors.NotGeoreferencedWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
pd.options.mode.chained_assignment = None
# Functions
def convertDates(array, year):
"""Convert everyday in an array to days since Jan 1 1970"""
def convertDate(julien_day, year):
base = dt.datetime(1970, 1, 1)
date = dt.datetime(year, 1, 1) + dt.timedelta(int(julien_day))
days = date - base
return days.days
# Loop through each position with data and convert
locs = np.where(array > 0)
ys = locs[0]
xs = locs[1]
locs = [[ys[i], xs[i]] for i in range(len(xs))]
for loc in locs:
y = loc[0]
x = loc[1]
array[y, x] = convertDate(array[y, x], year)
return array
def dateRange(perimeter):
"""Converts days in a perimeter object since Jan 1 1970 to date strings"""
if len(perimeter.coords) > 0:
base = dt.datetime(1970, 1, 1)
days = [p[2] for p in perimeter.coords]
day1 = (base + dt.timedelta(days=int(min(days)))).strftime("%Y-%m-%d")
else:
day1 = "N/A"
return day1
def edgeCheck(yedges, xedges, coord, sp_buffer):
"""Identify edge cases to make merging events quicker later"""
y = coord[0]
x = coord[1]
if y in yedges:
edge = True
elif x in xedges:
edge = True
else:
edge = False
return edge
def flttn(lst):
"""Just a quick way to flatten lists of lists"""
lst = [l for sl in lst for l in sl]
return lst
# def maxGrowthDate(x):
# dates = x["date"].to_numpy()
# pixels = x["pixels"].to_numpy()
# loc = np.where(pixels == np.max(pixels))[0]
# d = np.unique(dates[loc])[0]
# # if len(d) > 1:
# # d = ", ".join(d)
# # else:
# # d = d[0]
# return d
def maxGrowthDate(x):
dates = x["date"].to_numpy()
pixels = x["pixels"].to_numpy()
loc = np.where(pixels == np.max(pixels))[0]
d = np.unique(dates[loc])[0]
# if len(d) > 1:
# d = ", ".join(d)
# else:
# d = d[0]
return d
def mergeChecker(new_coords, full_list, temporal_param, radius):
"""
This uses a radius for the spatial window as opposed to a square and is not
currently being used to merge events.
"""
t1 = np.min([c[2] for c in new_coords]) - temporal_param
t2 = np.max([c[2] for c in new_coords]) + temporal_param
for i in range(len(full_list)):
old_event = full_list[i]
old_coords = old_event[1]
old_times = [c[2] for c in old_coords]
time_checks = [t for t in old_times if t >= t1 and t <= t2]
if len(time_checks) > 0:
for coord in new_coords:
# Check if the time coordinate is within an old event
radii = []
new_y = coord[0]
new_x = coord[1]
for oc in old_coords:
old_y = oc[0]
old_x = oc[1]
dy = abs(old_y - new_y)
dx = abs(old_x - new_x)
r = np.sqrt((dy ** 2) + (dx ** 2))
radii.append(r)
check = [r for r in radii if r <= radius]
if any(check):
return i, True
else:
return i, False
else:
return i, False
def mode(lst):
return max(set(list(lst)), key=list(lst).count)
def pquery(p, lc, lc_array):
"""Find the landcover code for a particular point (p)."""
row, col = lc.index(p.x, p.y)
lc_value = lc_array[row, col]
return lc_value
def rasterize(src, dst, attribute, resolution, crs, extent, all_touch=False, na=-9999):
"""Rasterizes input vector data"""
# Open shapefile, retrieve the layer
src_data = ogr.Open(src)
layer = src_data.GetLayer()
# Use transform to derive coordinates and dimensions
xmin = extent[0]
ymin = extent[1]
xmax = extent[2]
ymax = extent[3]
# Create the target raster layer
cols = int((xmax - xmin)/resolution)
rows = int((ymax - ymin)/resolution) + 1
trgt = gdal.GetDriverByName("GTiff").Create(dst, cols, rows, 1,
gdal.GDT_Float32)
trgt.SetGeoTransform((xmin, resolution, 0, ymax, 0, -resolution))
# Add crs
refs = osr.SpatialReference()
refs.ImportFromWkt(crs)
trgt.SetProjection(refs.ExportToWkt())
# Set no value
band = trgt.GetRasterBand(1)
band.SetNoDataValue(na)
# Set options
if all_touch is True:
ops = ["-at", "ATTRIBUTE=" + attribute]
else:
ops = ["ATTRIBUTE=" + attribute]
# Finally rasterize
gdal.RasterizeLayer(trgt, [1], layer, options=ops)
# Close target an source rasters
del trgt
del src_data
def requestIO(url):
"""Function for setting IO request for data download"""
b = BytesIO()
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.WRITEFUNCTION, b.write)
c.perform()
c.close()
content = b.getvalue()
return content
def spCheck(diffs, sp_buf):
"""Quick function to check if events land within the spatial window."""
checks = [e for e in diffs if abs(e) < sp_buf]
if any(checks):
check = True
else:
check = False
return check
def toAcres(p, res):
return (p*res**2) * 0.000247105
def toHa(p, res):
return (p*res**2) * 0.0001
def toKms(p, res):
return (p*res**2)/1000000
def toDays(date, base):
"""Convert dates to days since a base date"""
if type(date) is str:
date = dt.datetime.strptime(date, "%Y-%m-%d")
delta = (date - base)
days = delta.days
return days
def asMultiPolygon(polygon):
if type(polygon) == Polygon:
polygon = MultiPolygon([polygon])
return polygon
def get(self, remotepath, localpath=None):
"""Copies a file between the remote host and the local host."""
"""For Paramiko SSH/SFTP Client Access: fuoco.geog.umd.edu"""
if not localpath:
localpath = os.path.split(remotepath)[1]
self._sftp_connect()
self._sftp.get(remotepath, localpath)
def downloadLC(query, session):
"""Downloads MODIS land cover data"""
link = query[0]
dst = query[1]
try:
# submit the request using the session
response = session.get(link, stream=True)
# raise an exception in case of http errors
response.raise_for_status()
# save the file
with open(dst, 'wb') as fd:
fd.write(response.content)
except requests.exceptions.HTTPError as e:
# handle any errors here
print(e)
# Classes
class DataGetter:
"""
Things to do/remember:
- parallel downloads
"""
def __init__(self, proj_dir, start_yr, end_yr, username, password):
self.proj_dir = proj_dir
self.start_yr= start_yr
self.end_yr = end_yr
self.username = username
self.password = password
self.date = dt.datetime.today().strftime("%m-%d-%Y")
self.createPaths()
self.cpus = os.cpu_count()
self.modis_template_path = os.path.join(proj_dir, "rasters/")
self.modis_template_file_root = "mosaic_template.tif"
self.landcover_path = os.path.join(proj_dir, "rasters/landcover")
self.landcover_file_root = "lc_mosaic_"
self.modis_crs = ("+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +a=6371007.181 +b=6371007.181 +units=m +no_defs")
self.nc_path = os.path.join(proj_dir, "rasters/burn_area/netcdfs")
self.hdf_path = os.path.join(proj_dir, "rasters/burn_area/hdfs")
self.tiles = ["h08v04", "h09v04", "h10v04", "h11v04", "h12v04",
"h13v04", "h08v05", "h09v05", "h10v05", "h11v05",
"h12v05", "h08v06", "h09v06", "h10v06", "h11v06"]
print("Project Folder: " + proj_dir)
def createPaths(self):
sub_folders = ["rasters/burn_area", "rasters/burn_area/hdfs",
"rasters/ecoregion", "rasters/landcover",
"rasters/landcover/mosaics/", "shapefiles/ecoregion",
"tables"]
folders = [os.path.join(self.proj_dir, sf) for sf in sub_folders]
for f in folders:
if not os.path.exists(f):
os.makedirs(f)
def getBurns(self):
"""
This will download the MODIS burn event data set tiles and create a
singular mosaic to use as a template file for coordinate reference
information and geometries.
User manual:
http://modis-fire.umd.edu/files/MODIS_C6_BA_User_Guide_1.2.pdf
Update 02/2021 -> fuoco server transitioned to SFTP Dec 2020
Update firedpy to use Paramiko SSHClient / SFTPClient
Server-side changes are described in the user manual linked above
SFTP:
sftp://fire:[email protected]/gfed4/MCD64A1/C6/
username: fire
password: <PASSWORD>
"""
##################################################################################
# Check into the UMD SFTP fuoco server using Paramiko
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(hostname="fuoco.geog.umd.edu", username="fire", password="<PASSWORD>")
print("Connected to 'fuoco.geog.umd.edu' ...")
# Open the connection to the SFTP
sftp_client = ssh_client.open_sftp()
##################################################################################
# Use specified tiles or...download all tiles if the list is empty
if self.tiles[0].lower() != "all":
tiles = self.tiles
else:
sftp_client.chdir('/data/MODIS/C6/MCD64A1/HDF')
dirs = sftp_client.listdir()
tiles = dirs
# print(tiles)
# Download the available files and catch failed downloads
for tile in tiles:
# Find remote folder for the tile
sftp_folder = '/data/MODIS/C6/MCD64A1/HDF/' + tile
# Check if remote folder exists and if not, continue to next tile
try:
# Change directory to remote folder
sftp_client.chdir(sftp_folder)
hdfs = sftp_client.listdir()
# hdfs = [h for h in hdfs if ".hdf" in h]
# Make sure local target folder exists
folder = os.path.join(self.hdf_path, tile)
if not os.path.exists(folder):
os.mkdir(folder)
# Skip this if the final product exists
nc_file = os.path.join(self.proj_dir,
"rasters/burn_area/netcdfs/" + tile + ".nc")
# ~~~~~~~~~~~~~~~~~~Download~~~~~~~~~~~~~~~~~~~~~~~~~~
if not os.path.exists(nc_file):
print("Downloading/Checking HDF files for: " + tile)
if self.start_yr and self.end_yr:
yrs= list(range(self.start_yr, | |
<reponame>Flipajs/FERDA
__author__ = 'simon'
import random
import matplotlib.colors as colors
import numpy as np
from PyQt4 import QtGui, QtCore
from skimage.transform import resize
from core.log import LogCategories, ActionNames
from core.region.region import Region
from gui.graph_widget.custom_line_selectable import Custom_Line_Selectable
from gui.graph_widget.pixmap_selectable import Pixmap_Selectable
from gui.img_controls.gui_utils import cvimg2qtpixmap
from gui.img_controls.my_scene import MyScene
from gui.plot.plot_chunks import PlotChunks
from gui.settings import Settings as S_
from gui.view.chunks_on_frame import ChunksOnFrame
from utils.drawing.points import draw_points_crop
# Number of max boxes below graph, max is six
BOX_NUM = 6
if BOX_NUM > 6:
BOX_NUM = 6
# Selectable opacity of background color in %
OPACITY = 60
# fixed height of buttons
HEIGHT = 70
# size of font used in labels (px)
FONT_SIZE = 13
# Margins of the workspace
M = 3
#Multiplayer of width of graph
GRAPH_WIDTH = 1
SIMILARITY = 'sim'
STRONG = 's'
CONFIRMED = 'c'
MERGED = 'm'
SPLIT = 'split'
class NodeGraphVisualizer(QtGui.QWidget):
def __init__(self, solver, g, regions, chunks, node_size=30, show_in_visualize_callback=None, show_vertically=False):
super(NodeGraphVisualizer, self).__init__()
self.show_in_visualizer_callback = show_in_visualize_callback
self.solver = solver
self.project = solver.project
self.g = g
self.regions = regions
self.edges_obj = {}
self.nodes_obj = {}
self.pixmaps = {}
self.selected_edge = [[None, None], None]
self.show_frames_number = True
self.used_rows = {}
self.column_count = 1
self.frames = []
self.positions = {}
self.node_displayed = {}
self.node_size = node_size
self.y_step = self.node_size + 2
self.x_step = self.node_size + 200
self.availability = np.zeros(len(regions))
self.toggled = []
self.show_vertically = show_vertically
self.chunks = chunks
self.view = QtGui.QGraphicsView(self)
self.setLayout(QtGui.QVBoxLayout())
self.edge_info_layout = QtGui.QHBoxLayout()
self.node_info_layout = QtGui.QHBoxLayout()
self.edge_info_layout.setSpacing(M)
self.node_info_layout.setSpacing(M)
self.view.setMouseTracking(True)
self.scene = MyScene()
self.view.setScene(self.scene)
self.scene.clicked.connect(self.scene_clicked)
self.layout().addLayout(self.edge_info_layout)
self.layout().addWidget(self.view)
self.layout().addLayout(self.node_info_layout)
self.layout().setContentsMargins(M, M, M, M)
self.info_label_upper = QtGui.QLabel()
self.stylesheet_info_label = "font: bold %spx" % FONT_SIZE
self.info_label_upper.setStyleSheet(self.stylesheet_info_label)
self.info_label_upper.setText("Frame:\nCentroid:\nArea:")
self.info_label_upper.setFixedWidth(HEIGHT)
self.info_label_upper.setAlignment(QtCore.Qt.AlignVCenter | QtCore.Qt.AlignRight)
self.edge_info_layout.addWidget(self.info_label_upper)
self.left_label = QtGui.QLabel()
self.edge_info_layout.addWidget(self.left_label)
self.chunk_label = QtGui.QLabel()
self.edge_info_layout.addWidget(self.chunk_label)
self.right_label = QtGui.QLabel()
self.edge_info_layout.addWidget(self.right_label)
stylesheet = "font: %spx; border-style: solid; border-radius: 25px; border-width: 1.5px" % FONT_SIZE
self.right_label.setStyleSheet(stylesheet)
self.left_label.setStyleSheet(stylesheet)
self.chunk_label.setStyleSheet(stylesheet)
self.left_label.setAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter)
self.right_label.setAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter)
self.chunk_label.setAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter)
self.hide_button = QtGui.QPushButton("Hide (h)")
self.hide_button.setStyleSheet("background-color: grey; border-style:outset; border-radius: 25px; \
border-width: 2px; border-color: beige; font: bold 14px; min-width:10em; padding 6px")
self.hide_button.setFixedHeight(HEIGHT)
self.hide_button.setShortcut(QtGui.QKeySequence(QtCore.Qt.Key_H))
self.hide_button.clicked.connect(self.hide_button_function)
self.edge_info_layout.addWidget(self.hide_button)
self.aux_space_upper = QtGui.QLabel()
self.aux_space_upper.setFixedHeight(HEIGHT)
self.aux_space_upper.setFixedWidth(0)
self.upper_widgets = [self.info_label_upper, self.left_label, self.right_label,
self.chunk_label, self.hide_button, self.aux_space_upper]
self.widgets_hide(self.upper_widgets)
for label in self.upper_widgets:
label.setFixedHeight(HEIGHT)
self.plot_action = QtGui.QAction('plot', self)
self.plot_action.triggered.connect(self.plot_graph)
self.plot_action.setShortcut(QtGui.QKeySequence(QtCore.Qt.Key_P))
self.addAction(self.plot_action)
self.connect_action = QtGui.QAction('connect', self)
self.connect_action.triggered.connect(self.connect_chunks)
self.connect_action.setShortcut(S_.controls.global_view_join_chunks)
self.addAction(self.connect_action)
self.remove_action = QtGui.QAction('remove', self)
self.remove_action.triggered.connect(self.remove_chunk)
self.remove_action.setShortcut(S_.controls.remove_tracklet)
self.addAction(self.remove_action)
self.stop_following_action = QtGui.QAction('stop following', self)
self.stop_following_action.triggered.connect(self.stop_following)
self.stop_following_action.setShortcut(S_.controls.global_view_stop_following)
self.addAction(self.stop_following_action)
self.ignore_during_suggestions_action = QtGui.QAction('ignore during suggestion', self)
self.ignore_during_suggestions_action.triggered.connect(self.ignore_during_suggestions)
self.ignore_during_suggestions_action.setShortcut(S_.controls.ignore_case)
self.addAction(self.ignore_during_suggestions_action)
self.info_label_lower = QtGui.QLabel()
self.info_label_lower.setStyleSheet(self.stylesheet_info_label)
self.info_label_lower.setText("Frame:\nCentroid:\nArea:")
self.info_label_lower.setAlignment(QtCore.Qt.AlignVCenter | QtCore.Qt.AlignRight)
self.info_label_lower.setFixedWidth(HEIGHT)
self.node_info_layout.addWidget(self.info_label_lower)
self.aux_space_lower = QtGui.QLabel()
self.aux_space_lower.setFixedHeight(HEIGHT)
self.aux_space_lower.setFixedWidth(0)
self.edge_info_layout.addWidget(self.aux_space_upper)
self.node_info_layout.addWidget(self.aux_space_lower)
self.auxes = [self.aux_space_upper, self.aux_space_lower]
self.widgets_hide(self.auxes)
self.boxes = []
self.box_aux_count = 0
for i in range(BOX_NUM):
label = QtGui.QLabel()
label.setAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter)
label.setFixedHeight(HEIGHT)
label.hide()
self.boxes.append([label, None, None, None])
self.node_info_layout.addWidget(label)
self.clear_all_button = QtGui.QPushButton("Clear All (x)")
self.clear_all_button.setStyleSheet("background-color: grey; border-style:outset; border-width: 2px;\
border-color: beige; font: bold 14px;min-width:10em; border-radius:25px; padding 6px")
self.clear_all_button.setFixedHeight(HEIGHT)
self.clear_all_button.setShortcut(QtGui.QKeySequence(QtCore.Qt.Key_X))
self.clear_all_button.clicked.connect(self.clear_all_button_function)
self.clear_all_button.setFixedHeight(HEIGHT)
self.show_in_visualizer_action = QtGui.QAction('show in visualizer', self)
self.show_in_visualizer_action.triggered.connect(self.show_in_visualizer)
self.show_in_visualizer_action.setShortcut(QtGui.QKeySequence(QtCore.Qt.Key_V))
self.addAction(self.show_in_visualizer_action)
self.node_info_layout.addWidget(self.clear_all_button)
self.lower_widgets = [self.clear_all_button, self.info_label_lower, self.aux_space_lower]
self.widgets_hide(self.lower_widgets)
self.suggest_node = True
self.node_positions = {}
self.picked_node = None
self.ignored_nodes = {}
self.use_img_toggle = True
def stop_following(self):
self.picked_node = None
self.update_view()
def ignore_during_suggestions(self):
n1 = self.boxes[0][3]
self.ignored_nodes[n1] = True
self.picked_node = None
self.update_view()
def scene_clicked(self, click_pos):
item = self.scene.itemAt(click_pos)
self.selected_edge[1] = click_pos
# self.clear_all_button_function()
if not item:
self.widgets_hide(self.upper_widgets)
self.widgets_hide(self.lower_widgets)
for box in self.boxes:
box[0].hide()
for it in self.toggled:
self.scene.removeItem(it)
self.toggled = []
self.clear_all_button_function()
return
if isinstance(item, Custom_Line_Selectable):
e_ = self.edges_obj[item]
self.selected_edge[0] = e_
e = self.g[e_[0]][e_[1]]
try:
chunk = e["chunk_ref"]
except KeyError:
chunk = None
self.edge_labels_update(e_, chunk)
if isinstance(item, Pixmap_Selectable):
parent_pixmap = item.parent_pixmap
n_ = self.nodes_obj[parent_pixmap]
self.node_label_update(n_)
if self.use_img_toggle:
self.toggle_n(n_)
elif isinstance(item, QtGui.QGraphicsPixmapItem):
# toggled item...
return
else:
# self.clear_all_button_function()
self.suggest_node = False
self.update_view(None, None)
self.suggest_node = True
def node_label_update(self, node):
if self.info_label_lower.isHidden():
self.widgets_show(self.lower_widgets)
self.widgets_show(self.auxes)
if self.box_aux_count < BOX_NUM:
label = self.boxes[self.box_aux_count][0]
self.boxes[self.box_aux_count][3] = node
if self.box_aux_count > 0:
compare_n = self.boxes[0][3]
dist = round(np.linalg.norm(node.centroid() - compare_n.centroid()), 2)
label.setText(str(node.frame_ - compare_n.frame_) + "\n" +
str(dist) + "\n" + str(node.area()-compare_n.area()))
else:
c = node.centroid()
x = round(c[1], 2)
y = round(c[0], 2)
label.setText(str(node.frame_) + "\n" +
str(x) + ", " + str(y) + "\n" + str(Region.area(node)))
# if self.box_aux_count % 2 == 0:
self.boxes[self.box_aux_count][1] = color = random_hex_color_str()
# else:
# self.boxes[self.box_aux_count][1] = color = \
# inverted_hex_color_str(self.boxes[self.box_aux_count - 1][1])
color_alpha = hex2rgb_opacity_tuple(color)
stylesheet = "font: %spx; color: black; background-color: rgba%s; border-style: dashed; \
border-width: 1.5px; border-radius: 25px" % (str(FONT_SIZE), str(color_alpha))
label.setStyleSheet(stylesheet)
self.boxes[self.box_aux_count][0] = label
label.show()
self.store_border_color(color, node)
self.update_dashed_borders()
self.box_aux_count += 1
else:
self.box_aux_count = 0
for box in self.boxes:
box[0].hide()
box[2].setClipped(None)
self.scene.update()
self.node_label_update(node)
def store_border_color(self, color, node):
color_rgb = hex2rgb_tuple(color)
q_color = QtGui.QColor(color_rgb[0], color_rgb[1], color_rgb[2])
pixmap = self.pixmaps[node]
pixmap.setClipped(q_color)
self.boxes[self.box_aux_count][2] = pixmap
def update_dashed_borders(self):
if self.box_aux_count > 0:
label_changed = self.boxes[self.box_aux_count - 1][0]
color2 = self.boxes[self.box_aux_count - 1][1]
color2_alpha = hex2rgb_opacity_tuple(color2)
stylesheet = "font: %spx; color: black; background-color: rgba%s; border-style: solid; border-width: 1.5px; border-radius: 25px" \
% (str(FONT_SIZE), str(color2_alpha))
label_changed.setStyleSheet(stylesheet)
def edge_labels_update(self, edge, chunk):
self.left_label.setText(str(edge[0].frame_) + "\n" + str(edge[0].id_) + "\n" +
str(Region.centroid(edge[0])) + "\n" + str(Region.area(edge[0])))
if chunk is None:
text = "Not a chunk"
else:
text = "Chunk info:" + "\nSorted: " + str(chunk.is_sorted) + "\nReduced nodes: " + str(len(chunk.reduced))
self.chunk_label.setText(text)
self.right_label.setText(str(edge[1].frame_) + "\n" + str(edge[1].id_) + "\n" +
str(Region.centroid(edge[1])) + "\n" + str(Region.area(edge[1])))
self.widgets_show(self.upper_widgets)
self.widgets_show(self.auxes)
def widgets_hide(self, labels):
for label in labels:
if type(label) is list:
for l in label:
l.hide()
else:
label.hide()
def widgets_show(self, labels):
for label in labels:
if type(label) is list:
for l in label:
l.show()
else:
label.show()
def hide_button_function(self):
self.widgets_hide(self.upper_widgets)
if self.clear_all_button.isHidden():
self.widgets_hide(self.auxes)
def clear_all_button_function(self):
for box in self.boxes:
box[0].hide()
box[3] = None
if box[2] is not None:
box[2].setClipped(None)
# for it in self.toggled:
# # it.hide()
# self.scene.removeItem(it)
# del it
self.toggled = []
self.box_aux_count = 0
self.widgets_hide(self.lower_widgets)
if self.hide_button.isHidden():
self.widgets_hide(self.auxes)
def show_node(self, n, prev_pos=0):
self.node_displayed[n] = True
t = n.frame_
t_num = self.frames.index(t) * GRAPH_WIDTH
if n in self.positions:
pos = self.positions[n]
else:
pos = self.get_nearest_free_slot(t, prev_pos)
self.positions[n] = pos
vis = self.g.node[n]['img']
if vis.shape[0] > self.node_size or vis.shape[1] > self.node_size:
vis = np.asarray(resize(vis, (self.node_size, self.node_size)) * 255, dtype=np.uint8)
else:
z = np.zeros((self.node_size, self.node_size, 3), dtype=np.uint8)
z[0:vis.shape[0], 0:vis.shape[1]] = vis
vis = z
it = self.scene.addPixmap(cvimg2qtpixmap(vis))
x = self.x_step * t_num
y = self.y_step * pos
if self.show_vertically:
x, y = y, x
it.setPos(x, y)
self.node_positions[n] = (x, y)
self.nodes_obj[it] = n
it_ = Pixmap_Selectable(it, self.node_size)
self.pixmaps[n] = it_
def draw_edge_selectable(self, n1, n2):
t1 = n1.frame_
t2 = n2.frame_
t1_framenum = self.frames.index(t1) * GRAPH_WIDTH
t2_framenums = self.frames.index(t2) * GRAPH_WIDTH
from_x = self.x_step * t1_framenum + self.node_size
to_x = self.x_step * t2_framenums
from_y = self.y_step * self.positions[n1] + self.node_size / 2
to_y = self.y_step * self.positions[n2] + self.node_size / 2
if self.show_vertically:
from_x, from_y = from_y, from_x
to_x, to_y = to_y, to_x
line_ = QtCore.QLineF(from_x, from_y, to_x, to_y)
custom_line_ = Custom_Line_Selectable(line_)
self.scene.addItem(custom_line_)
self.edges_obj[custom_line_] = (n1, n2)
def prepare_positions(self, frames):
process_at_the_end = []
for f in frames:
for n1 in self.regions[f]:
if n1 not in self.g.node:
continue
if n1 in self.positions:
continue
is_ch, t_reversed, ch = self.solver.is_chunk(n1)
if t_reversed:
continue
n2 = ch.end_n
# for _, n2, d in self.g.out_edges(n1, data=True):
if n2 in self.positions:
continue
t1 = n1.frame_
t2 = n2.frame_
if t1 not in self.frames:
if t2 in self.frames:
process_at_the_end.append(n2)
continue
if t2 not in self.frames:
if t1 in self.frames:
process_at_the_end.append(n1)
continue
# t1_framenum = self.frames.index(t1) * GRAPH_WIDTH
# t2_framenum = self.frames.index(t2) * GRAPH_WIDTH
p1 = self.get_nearest_free_slot(t1, 0)
p2 = self.get_nearest_free_slot(t2, p1)
self.positions[n1] = p1
self.positions[n2] = p2
for t in range(t1, t2):
if t in self.used_rows:
self.used_rows[t][p1] = True
else:
self.used_rows[t] = {p1: True}
for n in process_at_the_end:
t = n.frame_
# t_framenum = self.frames.index(t) * GRAPH_WIDTH
p = self.get_nearest_free_slot(t, 0)
self.positions[n] = p
def get_nearest_free_slot(self, t, pos):
if t in self.used_rows:
step = 0
while True:
test_pos = pos - step
if test_pos > -1 and test_pos not in self.used_rows[t]:
self.used_rows[t][test_pos] = True
return test_pos
test_pos = pos + step
if test_pos not in self.used_rows[t]:
self.used_rows[t][test_pos] = True
return test_pos
step += 1
else:
self.used_rows[t] = {pos: True}
return pos
def visualize(self):
self.positions = {}
self.used_rows = {}
k = np.array(list(self.regions.keys()))
self.frames = np.sort(k).tolist()
nodes_queue = []
visited = | |
0XF0, 0XC1, 0XE3, 0XF8, 0X70, 0XE1, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF1, 0XE1, 0XE3, 0XF8, 0X70, 0XE1, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF1, 0XE0, 0XC3, 0XF8, 0X70, 0XE1, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF1, 0XE0, 0X03, 0XF8, 0X70, 0XE1, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF1, 0XF0, 0X07, 0XF8, 0X70, 0XE1, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF1, 0XF0, 0X07, 0XF8, 0X70, 0XE1, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF0, 0XF8, 0X0F, 0X80, 0X00, 0X00, 0X07, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFC, 0X1F, 0X80, 0X00, 0X00, 0X03, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0X80, 0X00, 0X00, 0X01, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0X80, 0X00, 0X00, 0X01, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF8, 0X70, 0XE1, 0XE1, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XF0, 0X00, 0X00, 0X03, 0XF8, 0X70, 0XE1, 0XE1, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XF0, 0X00, 0X00, 0X03, 0XF8, 0X70, 0XE1, 0XE1, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XF0, 0X00, 0X00, 0X03, 0XF8, 0X70, 0XE1, 0XE1, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XF0, 0X00, 0X00, 0X03, 0XF8, 0X70, 0XE1, 0XE1, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XF0, 0X00, 0X00, 0X03, 0XF8, 0X70, 0XE1, 0XE1, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFE, 0X3F, 0XFF, 0XF8, 0X00, 0X01, 0XE1, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFC, 0X7F, 0XFF, 0XF8, 0X00, 0X01, 0XE1, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF8, 0XFF, 0XFF, 0XF8, 0X00, 0X01, 0XE1, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF0, 0XFF, 0XFF, 0XF8, 0X00, 0X01, 0X81, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF0, 0XFF, 0XFF, 0XF8, 0X00, 0X00, 0X01, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF0, 0XFF, 0XFF, 0XFF, 0XFF, 0XFC, 0X03, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF0, 0X7F, 0XFF, 0XFF, 0XFF, 0XFE, 0X03, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF0, 0X00, 0X03, 0XFF, 0XFF, 0XFE, 0X0F, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF0, 0X00, 0X03, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF8, 0X00, 0X03, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFC, 0X00, 0X03, 0XFF, 0XFC, 0X3F, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0X00, 0X03, 0XFF, 0XFC, 0X3F, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XC3, 0XFC, 0X3F, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XC3, 0XFC, 0X3F, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XC3, 0XFC, 0X3F, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFC, 0X1F, 0XC3, 0XFC, 0X3F, 0XEF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF8, 0X07, 0XC3, 0XFC, 0X3F, 0XE1, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF8, 0X70, 0X07, 0XC3, 0XFC, 0X3F, 0XE1, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF8, 0XE0, 0X03, 0XC3, 0XFC, 0X3F, 0XE1, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF1, 0XE0, 0X03, 0XC3, 0XFC, 0X3F, 0XE1, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF1, 0XE1, 0XC3, 0XC3, 0XFC, 0X3F, 0XE1, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF1, 0XE3, 0XE3, 0XC3, 0XFC, 0X3F, 0XE1, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF1, 0XE3, 0XE7, 0XC3, 0XFC, 0X3F, 0XC1, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF0, 0XC3, 0XC7, 0XC3, 0XE0, 0X00, 0X01, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF0, 0X00, 0X0F, 0XC3, 0XC0, 0X00, 0X01, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF0, 0X00, 0X03, 0XC3, 0X80, 0X00, 0X03, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF8, 0X00, 0X03, 0XC3, 0X80, 0X00, 0X07, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF8, 0X00, 0X03, 0XC3, 0X00, 0X00, 0X1F, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFC, 0X00, 0X03, 0XC2, 0X04, 0X3F, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0X00, 0X03, 0XC2, 0X0C, 0X3F, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XC0, 0X1C, 0X3F, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XC0, 0X1C, 0X3F, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XC0, 0X3C, 0X3F, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XC0, 0X7C, 0X3F, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF0, 0X00, 0X03, 0XC0, 0XFC, 0X3F, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF0, 0X00, 0X03, 0XC0, 0XFC, 0X3F, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF0, 0X00, 0X03, 0XC1, 0XFC, 0X3F, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF0, 0X00, 0X03, 0XC3, 0XFC, 0X3F, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF0, 0X00, 0X03, 0XFF, 0XFC, 0X3F, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFC, 0X3F, 0XFF, 0XFF, 0XFC, 0X3F, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF8, 0X7F, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF0, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF0, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF0, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF0, 0X7F, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XC0, 0X7F, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0X00, 0X1F, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFE, 0X00, 0X0F, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFC, 0X00, 0X07, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF8, 0X00, 0X07, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF8, 0X63, 0X07, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF0, 0XE3, 0XC3, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF1, 0XE3, 0XC3, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF1, 0XE3, 0XE3, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF1, 0XE3, 0XE3, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF1, 0XE3, 0XE3, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF0, 0XE3, 0XE3, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XFF,
0XFF, 0XFF, 0XFF, 0XFF, 0XFF, 0XF8, 0X03, 0XE3, 0XFF, 0XFF, | |
import sys
from abc import ABC, abstractmethod
from typing import List, Iterable, Callable, Tuple
from pygame.surface import Surface
from lib.action.event import (
mouse_position, game_event,
pump_event, pressed_mouse,
draw_ellipse, smooth_scale,
init_game
)
from lib.environment.color import GameColor
from lib.environment.font import GameFont
from lib.environment.control import Controls, GameControls
from lib.environment.screen import GameScreen, Screen
from lib.environment.image import Image, GameImage
from lib.environment.shape import Rectangle
from lib.snake.food import Apple, Food
from lib.snake.snake import Snake, SnakeEntity
class Engine(ABC):
"""Abstract interface for a game."""
@abstractmethod
def prepare_canvas(self) -> None:
pass
@abstractmethod
def terminate(self) -> None:
pass
@abstractmethod
def make_button(self, pos, text, color, action, text_size=20) -> None:
pass
@abstractmethod
def reset(self) -> None:
pass
@abstractmethod
def loop(self) -> None:
pass
class Menu(ABC):
"""Abstract interface for a menu game."""
@abstractmethod
def prepare_canvas(self) -> None:
pass
@abstractmethod
def make_text(self, x: int, y: int, text: str, size: int=20, color: Tuple=(0, 0, 0), center: bool=False) -> None:
pass
@abstractmethod
def make_button(self, loc: Iterable, text: str, color: Iterable, action: Callable, size: int=20) -> None:
pass
@abstractmethod
def mainloop(self) -> None:
pass
@abstractmethod
def start_up(self) -> None:
pass
@abstractmethod
def small(self) -> None:
pass
@abstractmethod
def huge(self) -> None:
pass
@abstractmethod
def easy(self) -> None:
pass
@abstractmethod
def normal(self) -> None:
pass
@abstractmethod
def advanced(self) -> None:
pass
@abstractmethod
def expert(self) -> None:
pass
@abstractmethod
def quit(self) -> None:
pass
class Game(ABC):
"""Abstract interface for a game."""
@abstractmethod
def start(self, speed: float, size: int) -> None:
pass
@abstractmethod
def restart(self) -> None:
pass
@abstractmethod
def launch_main_menu(self) -> None:
pass
class Buttons(ABC):
"""Buttons abstract interface."""
@abstractmethod
def elements(self) -> Iterable:
pass
@abstractmethod
def start_up(self) -> None:
pass
@abstractmethod
def small(self) -> None:
pass
@abstractmethod
def huge(self) -> None:
pass
class SnakeGameEngine(Engine):
"""Snake game engine."""
def __init__(self, speed: float, size: int = 1) -> None:
self._screen: Screen = GameScreen()
self._snake: Snake = SnakeEntity(speed, size)
self._controls: Controls = GameControls()
self._blocks: List = []
self._score: int = 0
self._size: int = size
self._left, self._right, self._up, self._down = False, False, False, False
self._hover: bool = False
self._click: bool = False
self._button_click = None
self._apple: Food = Apple(size)
self.prepare_canvas()
def prepare_canvas(self) -> None:
for pict in range(0, 800, 10):
image: Image = GameImage(location=(10, 10))
image.fill(color=(0, 0, 0))
self._blocks.append([image.surface(), [pict, 0]])
for pict in range(0, 800, 10):
image: Image = GameImage(location=(10, 10))
image.fill(color=(0, 0, 0))
self._blocks.append([image.surface(), [pict, 440]])
for pict in range(0, 450, 10):
image: Image = GameImage(location=(10, 10))
image.fill(color=(0, 0, 0))
self._blocks.append([image.surface(), [0, pict]])
for pict in range(0, 450, 10):
image: Image = GameImage(location=(10, 10))
image.fill(color=(0, 0, 0))
self._blocks.append([image.surface(), [790, pict]])
def terminate(self) -> None:
while True:
for event in game_event():
if event.type == self._controls.type().quit():
sys.exit()
for block in self._blocks:
self._screen.blit(block[0], block[1])
text = GameFont('Courier New', 50, 'Game Over Score:', True, (255, 255, 255)).render()
txt_rect = text.get_rect()
txt_rect.topleft = (20, 150)
self._screen.blit(text, txt_rect)
text = GameFont('Courier New', 50, str(self._snake.score()), True, (255, 255, 255)).render()
txt_rect = text.get_rect()
txt_rect.topleft = (600, 150)
self._screen.blit(text, txt_rect)
self._screen.update()
self.make_button((153, 300, 100, 50),
'Restart', [(255, 255, 255), (150, 150, 150)],
action=PySnakeGame().restart)
if self._hover:
pump_event()
click: List = pressed_mouse()
if click[0] == 1:
self._click = True
if self._click:
if click[0] == 0:
self._button_click()
self._click = False
def make_button(self, loc: Iterable, text: str, color: Iterable, action: Callable, size=20):
mouse = mouse_position()
old_loc = loc
rect = Rectangle(loc).shape()
rect.topleft = 0, 0
rectangle: Surface = GameImage(rect.size, self._controls.type().src_alpha()).surface()
circle: Surface = GameImage([min(rect.size) * 3] * 2, self._controls.type().src_alpha()).surface()
draw_ellipse(circle, (0, 0, 0), 0)
circle = smooth_scale(circle, rect)
radius = rectangle.blit(circle, (0, 0))
radius.bottomright = rect.bottomright
rectangle.blit(circle, radius)
radius.topright = rect.topright
rectangle.blit(circle, radius)
radius.bottomleft = rect.bottomleft
rectangle.blit(circle, radius)
rectangle.fill((0, 0, 0), rect.inflate(-radius.w, 0))
rectangle.fill((0, 0, 0), rect.inflate(0, -radius.h))
loc = old_loc
if (loc[0] + loc[2]) > mouse[0] > loc[0] and (loc[1] + loc[3]) > mouse[1] > loc[1]:
self._hover = True
self._button_click = action
color = GameColor(color[1]).get()
alpha = color.a
color.a = 0
else:
color = GameColor(color[0]).get()
alpha = color.a
color.a = 0
self._hover = False
rectangle.fill(color, special_flags=self._controls.type().blend_rgba_max())
rectangle.fill((255, 255, 255, alpha), special_flags=self._controls.type().blend_rgba_min())
self._screen.blit(rectangle, loc)
txt = GameFont('Courier New', size, text, True, (0, 0, 0)).render()
txt_rect = txt.get_rect()
txt_rect.center = (loc[0] + loc[2] / 2), (loc[1] + loc[3] / 2)
self._screen.blit(txt, txt_rect)
def reset(self) -> None:
self._left, self._right, self._up, self._down = False, False, False, False
def loop(self) -> None:
while True:
self._screen.fill((35, 38, 117))
self._snake.update()
for block in self._blocks:
if self._snake.verify_snake(block[1]):
self.terminate()
self._screen.blit(block[0], block[1])
counter: int = 0
for block in self._snake.images():
if counter != 0:
if self._snake.verify_apple(block[1]):
self.terminate()
self._screen.blit(block[0], block[1])
counter += 1
if self._snake.verify_apple(self._apple.location()):
self._snake.put_apple()
del self._apple
self._apple: Food = Apple(self._size)
self._screen.blit(self._apple.image(), self._apple.location())
self._screen.blit(self._snake.image(), self._snake.location())
for event in game_event():
if event.type == self._controls.type().quit():
sys.exit(0)
elif event.type == self._controls.type().down():
if event.key == self._controls.key().escape():
sys.exit(0)
if event.key == self._controls.key().right():
if not self._left:
self.reset()
self._snake.move_right()
self._right = True
if event.key == self._controls.key().left():
if not self._right:
self.reset()
self._snake.move_left()
self._left = True
if event.key == self._controls.key().up():
if not self._down:
self.reset()
self._snake.move_up()
self._up = True
if event.key == self._controls.key().down():
if not self._up:
self.reset()
self._snake.move_down()
self._down = True
self._screen.update()
class StartGameMenu(Menu):
"""Start game menu interface."""
def __init__(self):
self._game: Game = PySnakeGame()
self._controls: Controls = GameControls()
self._screen: Screen = GameScreen()
self._run_button = '(150, 300,100,50),"Run", [(0,255,0), (0,150,0)], action = self.start_up'
self._quit_button = '(550, 300,100,50),"Quit", [(255,0,0), (150,0,0)], action = self.quit'
self._hard_button, self._expert_button = None, None
self._buttons = [self._run_button, self._quit_button]
self._button_click = None
self._hover = None
self._blocks = []
self._size = 1
self._click, self._loads = False, False
self.prepare_canvas()
def prepare_canvas(self) -> None:
for pict in range(0, 800, 10):
image: Image = GameImage(location=(10, 10))
image.fill(color=(0, 0, 0))
self._blocks.append([image.surface(), [pict, 0]])
for pict in range(0, 800, 10):
image: Image = GameImage(location=(10, 10))
image.fill(color=(0, 0, 0))
self._blocks.append([image.surface(), [pict, 440]])
for pict in range(0, 450, 10):
image: Image = GameImage(location=(10, 10))
image.fill(color=(0, 0, 0))
self._blocks.append([image.surface(), [0, pict]])
for pict in range(0, 450, 10):
image: Image = GameImage(location=(10, 10))
image.fill(color=(0, 0, 0))
self._blocks.append([image.surface(), [790, pict]])
def make_text(self, x: int, y: int, text: str, size: int=20, color: Tuple=(0, 0, 0), center: bool=False) -> None:
txts = GameFont('Courier New', size, text, True, color).render()
txtrect = txts.get_rect()
txtrect.topleft = x, y
if center:
txtrect.center = x, y
self._screen.blit(txts, txtrect)
def make_button(self, loc: Iterable, text: str, color: Iterable, action: Callable, size: int=20) -> None:
mouse = mouse_position()
oldpos = loc
rect = Rectangle(loc).shape()
rect.topleft = 0, 0
rectangle: Surface = GameImage(rect.size, self._controls.type().src_alpha()).surface()
circle: Surface = GameImage([min(rect.size) * 3] * 2, self._controls.type().src_alpha()).surface()
draw_ellipse(circle, (0, 0, 0), 0)
circle = smooth_scale(circle, rect)
radius = rectangle.blit(circle, (0, 0))
radius.bottomright = rect.bottomright
rectangle.blit(circle, radius)
radius.topright = rect.topright
rectangle.blit(circle, radius)
radius.bottomleft = rect.bottomleft
rectangle.blit(circle, radius)
rectangle.fill((0, 0, 0), rect.inflate(-radius.w, 0))
rectangle.fill((0, 0, 0), rect.inflate(0, -radius.h))
loc = oldpos
if (loc[0] + loc[2]) > mouse[0] > loc[0] and (loc[1] + loc[3]) > mouse[1] > loc[1]:
self._hover = True
self._button_click = action
color = GameColor(color[1]).get()
alpha = color.a
color.a = 0
else:
color = GameColor(color[0]).get()
alpha = color.a
color.a = 0
self._hover = False
rectangle.fill(color, special_flags=self._controls.type().blend_rgba_max())
rectangle.fill((255, 255, 255, alpha), special_flags=self._controls.type().blend_rgba_min())
self._screen.blit(rectangle, loc)
self.make_text((loc[0] + loc[2] / 2), (loc[1] + loc[3] / 2), text, center=True, size=size)
def mainloop(self) -> None:
while True:
self._screen.fill((35, 38, 117))
self.make_text(400, 150, 'PySnake Game', color=(255, 255, 255), size=80, center=True)
for event in game_event():
if event.type == self._controls.type().quit():
sys.exit()
for block in self._blocks:
self._screen.blit(block[0], block[1])
for button in self._buttons:
exec('self.make_button({})'.format(button))
if self._hover:
pump_event()
click = pressed_mouse()
if click[0] == 1:
self._click = True
if self._click:
if click[0] == 0:
self._button_click()
self._click = False
self._screen.update()
def start_up(self) -> None:
self._run_button: str = '(150,300,100,50),"Small", [(0,255,0), (0,150,0)], self.small'
self._quit_button: str = '(550,300,100,50),"Huge", [(0,255,0), (0,150,0)], self.huge'
self._buttons: List = [self._run_button, self._quit_button]
def small(self) -> None:
self._run_button: str = '(150,300,100,50),"Easy", [(0,255,0), (0,150,0)], self.easy'
self._quit_button: str = '(283,300,100,50),"Normal", [(0,255,0), (0,150,0)], self.normal'
self._hard_button: str = '(417,300,100,50),"Advanced", [(0,255,0), (0,150,0)], self.advanced'
self._expert_button: str = '(550,300,100,50),"Expert", [(0,255,0), (0,150,0)], self.expert'
self._buttons: List = [self._run_button, self._quit_button, self._hard_button, self._expert_button]
def huge(self) -> None:
self._size = 2
self._run_button: str = '(150,300,100,50),"Easy", [(0,255,0), (0,150,0)], self.easy'
self._quit_button: str = '(283,300,100,50),"Normal", [(0,255,0), (0,150,0)], self.normal'
self._hard_button: str = '(417,300,100,50),"Advanced", [(0,255,0), (0,150,0)], self.advanced'
self._expert_button: str = '(550,300,100,50),"Expert", [(0,255,0), (0,150,0)], self.expert'
self._buttons: List = [self._run_button, self._quit_button, self._hard_button, self._expert_button]
def easy(self) -> None:
self._game.start(1, self._size)
def normal(self) | |
<reponame>l2tor/underworlds
import uuid
import time
import logging;logger = logging.getLogger("underworlds.server")
from underworlds.types import *
import underworlds_pb2 as gRPC
from grpc.beta import interfaces as beta_interfaces
class Server(gRPC.BetaUnderworldsServicer):
def __init__(self):
self._worlds = {}
# for each world (key), stores a mapping {client: list of node IDs that
# are to be invalidated}
self._node_invalidations = {}
self._timeline_invalidations = {}
self._clients = {} # for each client, stored the links (cf clients' types) with the various worlds.
self._clientnames = {}
# meshes are stored as a dictionary:
# - the key is a unique ID
# - the value is a ditionary with these keys:
# - vertices: [(x,y,z), ...]
# - faces: [(i1, i2, i3), ...] with i an index in the vertices list
# - normals
self.meshes = {}
self.starttime = time.time()
def _new_client(self, id, name):
self._clients[id] = {}
self._clientnames[id] = name
logger.info("New client <%s> has connected." % name)
def _clientname(self, id):
return self._clientnames.get(id, id)
def _new_world(self, name):
self._worlds[name] = World(name)
self._node_invalidations[name] = {}
self._timeline_invalidations[name] = {}
def _get_scene_timeline(self, ctxt):
world = ctxt.world
if world not in self._worlds:
self._new_world(world)
logger.info("<%s> created a new world <%s>" % (self._clientname(ctxt.client),
world))
scene = self._worlds[world].scene
timeline = self._worlds[world].timeline
return scene, timeline
def _update_current_links(self, client, world, type):
if world in self._clients[client]:
current_type = self._clients[client][world][0]
# update only if the current link is 'READER' (we do not
# want a 'READER' to overwrite a 'PROVIDER' for instance)
type = type if current_type == READER else current_type
self._clients[client][world] = (type, time.time())
def _update_node(self, scene, node):
parent_has_changed = False
node.last_update = time.time()
oldnode = scene.node(node.id)
if oldnode: # the node already exist
parent_has_changed = oldnode.parent != node.parent
# update the list of children
node.children = [n.id for n in scene.nodes if n.parent == node.id]
# replace the node
scene.nodes = [node if old == node else old for old in scene.nodes]
action = gRPC.NodeInvalidation.UPDATE
else: # new node
scene.nodes.append(node)
if node.parent:
parent_has_changed = True
action = gRPC.NodeInvalidation.NEW
return action, parent_has_changed
def _delete_node(self, scene, id):
scene.nodes.remove(scene.node(id))
def _add_node_invalidation(self, world, node_id, invalidation_type):
for client_id in self._node_invalidations[world].keys():
self._node_invalidations[world][client_id].append(gRPC.NodeInvalidation(type=invalidation_type, id=node_id))
def _add_timeline_invalidation(self, world, sit_id, invalidation_type):
for client_id in self._timeline_invalidations[world].keys():
self._timeline_invalidations[world][client_id].append(gRPC.TimelineInvalidation(type=invalidation_type, id=sit_id))
#############################################
############ Underworlds API ################
############ GENERAL
def helo(self, client, context):
client_id = str(uuid.uuid4())
logger.debug("Got <helo> from %s" % client_id)
self._new_client(client_id, client.name)
res = gRPC.Client(id=client_id)
logger.debug("<helo> completed")
return res
def uptime(self, client, context):
logger.debug("Got <uptime> from %s" % client.id)
res = gRPC.Time(time=time.time() - self.starttime)
logger.debug("<uptime> completed")
return res
def topology(self, client, context):
logger.debug("Got <topology> from %s" % client.id)
topo = gRPC.Topology()
for w in self._worlds.keys():
topo.worlds.append(w)
for client_id, links in self._clients.items():
client = topo.clients.add()
client.id = client_id
client.name = self._clientname(client_id)
for w, details in links.items():
type, timestamp = details
interaction = client.links.add()
interaction.world = w
interaction.type = type
interaction.last_activity.time = timestamp
logger.debug("<topology> completed")
return topo
def reset(self, client, context):
logger.debug("Got <reset> from %s" % client.id)
logger.warning("Resetting Underworlds upon client <%s> request" % client.id)
logger.warning("This might break other clients!")
self._worlds = {}
self._node_invalidations = {}
self._timeline_invalidations = {}
logger.debug("<reset> completed")
return gRPC.Empty()
############ NODES
def getNodesLen(self, ctxt, context):
logger.debug("Got <getNodesLen> from %s" % ctxt.client)
self._update_current_links(ctxt.client, ctxt.world, READER)
scene,_ = self._get_scene_timeline(ctxt)
res = gRPC.Size(size=len(scene.nodes))
logger.debug("<getNodesLen> completed")
return res
def getNodesIds(self, ctxt, context):
logger.debug("Got <getNodesIds> from %s" % ctxt.client)
self._update_current_links(ctxt.client, ctxt.world, READER)
scene,_ = self._get_scene_timeline(ctxt)
nodes = gRPC.Nodes()
for n in scene.nodes:
nodes.ids.append(n.id)
logger.debug("<getNodesIds> completed")
return nodes
def getRootNode(self, ctxt, context):
logger.debug("Got <getRootNode> from %s" % ctxt.client)
self._update_current_links(ctxt.client, ctxt.world, READER)
scene,_ = self._get_scene_timeline(ctxt)
res = gRPC.Node(id=scene.rootnode.id)
logger.debug("<getRootNode> completed")
return res
def getNode(self, nodeInCtxt, context):
logger.debug("Got <getNode> from %s" % nodeInCtxt.context.client)
self._update_current_links(nodeInCtxt.context.client, nodeInCtxt.context.world, READER)
client_id, world = nodeInCtxt.context.client, nodeInCtxt.context.world
scene,_ = self._get_scene_timeline(nodeInCtxt.context)
self._update_current_links(client_id, world, READER)
node = scene.node(nodeInCtxt.node.id)
if not node:
logger.warning("%s has required an non-existant "
"node <%s> in world %s" % (self._clientname(client_id), nodeInCtxt.node.id, world))
context.details("Node <%s> does not exist in world %s" % (nodeInCtxt.node.id, world))
context.code(beta_interfaces.StatusCode.UNKNOWN)
else:
res = node.serialize(gRPC.Node)
logger.debug("<getNode> completed")
return res
def updateNode(self, nodeInCtxt, context):
logger.debug("Got <updateNode> from %s" % nodeInCtxt.context.client)
self._update_current_links(nodeInCtxt.context.client, nodeInCtxt.context.world, PROVIDER)
client_id, world = nodeInCtxt.context.client, nodeInCtxt.context.world
scene,_ = self._get_scene_timeline(nodeInCtxt.context)
node = Node.deserialize(nodeInCtxt.node)
invalidation_type, parent_has_changed = self._update_node(scene, node)
logger.info("<%s> %s node <%s> in world <%s>" % \
(self._clientname(client_id),
"updated" if invalidation_type==gRPC.NodeInvalidation.UPDATE else ("created" if invalidation_type==gRPC.NodeInvalidation.NEW else "deleted"),
repr(node),
world))
logger.debug("Adding invalidation action [" + str(invalidation_type) + "]")
self._add_node_invalidation(world, node.id, invalidation_type)
## If necessary, update the node hierarchy
if parent_has_changed:
parent = scene.node(node.parent)
if parent is None:
logger.warning("Node %s references a non-exisiting parent" % node)
elif node.id not in parent.children:
parent.children.append(node.id)
# tells everyone about the change to the parent
logger.debug("Adding invalidation action [update " + parent.id + "] due to hierarchy update")
self._add_node_invalidation(world, parent.id, gRPC.NodeInvalidation.UPDATE)
# As a node has only one parent, if the parent has changed we must
# remove our node for its previous parent
for othernode in scene.nodes:
if othernode.id != parent.id and node.id in othernode.children:
othernode.children.remove(node.id)
# tells everyone about the change to the former parent
logger.debug("Adding invalidation action [update " + othernode.id + "] due to hierarchy update")
self._add_node_invalidation(world, othernode.id, gRPC.NodeInvalidation.UPDATE)
break
logger.debug("<updateNode> completed")
return gRPC.Empty()
def deleteNode(self, nodeInCtxt, context):
logger.debug("Got <deleteNode> from %s" % nodeInCtxt.context.client)
self._update_current_links(nodeInCtxt.context.client, nodeInCtxt.context.world, PROVIDER)
client_id, world = nodeInCtxt.context.client, nodeInCtxt.context.world
scene,_ = self._get_scene_timeline(nodeInCtxt.context)
node = scene.node(nodeInCtxt.node.id)
logger.info("<%s> deleted node <%s> in world <%s>" % \
(self._clientname(client_id),
repr(node),
world))
action = self._delete_node(scene, nodeInCtxt.node.id)
# tells everyone about the change
logger.debug("Sent invalidation action [delete]")
self._add_node_invalidation(world, nodeInCtxt.node.id, gRPC.NodeInvalidation.DELETE)
# Also remove the node for its parent's children
parent = scene.node(node.parent)
if parent:
parent.children.remove(node.id)
# tells everyone about the change to the parent
logger.debug("Sent invalidation action [update " + parent.id + "] due to hierarchy update")
self._add_node_invalidation(world, parent.id, gRPC.NodeInvalidation.UPDATE)
logger.debug("<updateNode> completed")
return gRPC.Empty()
#### Nodes invalidation streams
def getNodeInvalidations(self, ctxt, context):
""" For each pair (world, client), check if nodes need to be
invalidated, and yield accordingly the invalidation messages.
"""
world, client = ctxt.world, ctxt.client
# (if this client is not yet monitoring this world, add it as a side effect of the test)
if self._node_invalidations[world].setdefault(client,[]):
for invalidation in self._node_invalidations[world][client]:
yield invalidation
self._node_invalidations[world][client] = []
#try:
#except Exception as e:
# context.details("Exception in getInvalidations: %s" %repr(e))
# context.code(beta_interfaces.StatusCode.UNKNOWN)
############ TIMELINES
def timelineOrigin(self, ctxt, context):
logger.debug("Got <timelineOrigin> from %s" % ctxt.client)
self._update_current_links(ctxt.client, ctxt.world, READER)
_,timeline = self._get_scene_timeline(ctxt)
res = gRPC.Time(time=timeline.origin)
logger.debug("<timelineOrigin> completed")
return res
def event(self, sitInCtxt, context):
client, world = sitInCtxt.context.client, sitInCtxt.context.world
logger.debug("Got <event> from %s" % client)
self._update_current_links(client, world, PROVIDER)
_, timeline = self._get_scene_timeline(sitInCtxt.context)
sit = Situation.deserialize(sitInCtxt.situation)
if timeline.situation(sit.id): # the situation already exist. Error!
raise Exception("Attempting to add twice the same situation!")
timeline.event(sit)
self._add_timeline_invalidation(world, sit.id, gRPC.TimelineInvalidation.EVENT)
logger.debug("<event> completed")
return gRPC.Empty()
def startSituation(self, sitInCtxt, context):
client, world = sitInCtxt.context.client, sitInCtxt.context.world
logger.debug("Got <startSituation> from %s" % client)
self._update_current_links(client, world, PROVIDER)
_, timeline = self._get_scene_timeline(sitInCtxt.context)
sit = Situation.deserialize(sitInCtxt.situation)
if timeline.situation(sit.id): # the situation already exist. Error!
raise Exception("Attempting to add twice the same situation!")
timeline.start(sit)
self._add_timeline_invalidation(world, sit.id, gRPC.TimelineInvalidation.START)
logger.debug("<startSituation> completed")
return gRPC.Empty()
def endSituation(self, sitInCtxt, context):
client, world = sitInCtxt.context.client, sitInCtxt.context.world
logger.debug("Got <endSituation> from %s" % client)
self._update_current_links(client, world, PROVIDER)
_, timeline = self._get_scene_timeline(sitInCtxt.context)
sit = timeline.situation(sitInCtxt.situation.id)
if not sit:
raise Exception("Attempting to end a non-existant situation!")
timeline.end(sit)
self._add_timeline_invalidation(world, sit.id, gRPC.TimelineInvalidation.END)
logger.debug("<endSituation> completed")
return gRPC.Empty()
#### Timeline invalidation streams
def getTimelineInvalidations(self, ctxt, context):
""" For each pair (world, client), check if situations need to be
invalidated, and yield accordingly the invalidation messages.
"""
world, client = ctxt.world, ctxt.client
# (if this client is not yet monitoring this world, add it as a side effect of the test)
if self._timeline_invalidations[world].setdefault(client,[]):
for invalidation in self._timeline_invalidations[world][client]:
yield invalidation
self._timeline_invalidations[world][client] = []
############ MESHES
def hasMesh(self, meshInCtxt, context):
logger.debug("Got <hasMesh> from %s" % meshInCtxt.client.id)
res = gRPC.Bool(value=(meshInCtxt.mesh.id in self.meshes))
logger.debug("<hasMesh> completed")
return res
def getMesh(self, meshInCtxt, context):
logger.debug("Got <getMesh> from %s" % meshInCtxt.client.id)
logger.debug("<getMesh> completed")
return self.meshes[meshInCtxt.mesh.id]
def pushMesh(self, meshInCtxt, context):
logger.debug("Got <pushMesh> from %s" % meshInCtxt.client.id)
mesh_id = meshInCtxt.mesh.id
self.meshes[mesh_id] = meshInCtxt.mesh
logger.info("<%s> added a new mesh ID %s (%d faces)" % \
(self._clientname(meshInCtxt.client.id),
| |
2, so this verifies that reversed() still turns this into an
# iterator.
assert isinstance(act_keys_iter, Iterator)
act_keys = list(act_keys_iter)
assert act_keys == exp_keys
TESTCASES_DICTVIEW_GET = [
# Testcases for DictView.get()
# Each list item is a testcase tuple with these items:
# * desc: Short testcase description.
# * kwargs: Keyword arguments for the test function:
# * obj: DictView object to be used for the test.
# * key: Key to be used for the test.
# * default: Default value to be used for the test, or _OMIT_ARG.
# * exp_value: Expected value at the key.
# * exp_exc_types: Expected exception type(s), or None.
# * exp_warn_types: Expected warning type(s), or None.
# * condition: Boolean condition for testcase to run, or 'pdb' for debugger
# Empty DictView
(
"Empty dict, with None key",
dict(
obj=DictView({}),
key=None,
default=_OMIT_ARG,
exp_value=None,
),
None, None, True
),
(
"Empty dict, with integer key",
dict(
obj=DictView({}),
key=1234,
default=_OMIT_ARG,
exp_value=None,
),
None, None, True
),
(
"Empty dict, with empty string key (defaulted without default)",
dict(
obj=DictView({}),
key='',
default=_OMIT_ARG,
exp_value=None,
),
None, None, True
),
(
"Empty dict, with empty string key (defaulted to a value)",
dict(
obj=DictView({}),
key='',
default='Newbie',
exp_value='Newbie',
),
None, None, True
),
(
"Empty dict, with non-empty key (defaulted without default)",
dict(
obj=DictView({}),
key='Dog',
default=_OMIT_ARG,
exp_value=None,
),
None, None, True
),
(
"Empty dict, with non-empty key (defaulted to a value)",
dict(
obj=DictView({}),
key='Dog',
default='Kitten',
exp_value='Kitten',
),
None, None, True
),
# Non-empty DictView
(
"Non-empty dict, with None key",
dict(
obj=DictView(OrderedDict([('Dog', 'Cat'), ('Budgie', 'Fish')])),
key=None,
default=_OMIT_ARG,
exp_value=None,
),
None, None, True
),
(
"Non-empty dict, with empty string key (defaulted without default)",
dict(
obj=DictView(OrderedDict([('Dog', 'Cat'), ('Budgie', 'Fish')])),
key='',
default=_OMIT_ARG,
exp_value=None,
),
None, None, True
),
(
"Non-empty dict, with empty string key (defaulted to a value)",
dict(
obj=DictView(OrderedDict([('Dog', 'Cat'), ('Budgie', 'Fish')])),
key='',
default='Newbie',
exp_value='Newbie',
),
None, None, True
),
(
"Non-empty dict, with non-empty non-existing key (defaulted without "
"default)",
dict(
obj=DictView(OrderedDict([('Dog', 'Cat'), ('Budgie', 'Fish')])),
key='invalid',
default=_OMIT_ARG,
exp_value=None,
),
None, None, True
),
(
"Non-empty dict, with non-empty non-existing key (defaulted to a "
"value)",
dict(
obj=DictView(OrderedDict([('Dog', 'Cat'), ('Budgie', 'Fish')])),
key='invalid',
default='Newbie',
exp_value='Newbie',
),
None, None, True
),
(
"Non-empty dict, with existing key (no default)",
dict(
obj=DictView(OrderedDict([('Dog', 'Cat'), ('Budgie', 'Fish')])),
key='Dog',
default=_OMIT_ARG,
exp_value='Cat',
),
None, None, True
),
]
@pytest.mark.parametrize(
"desc, kwargs, exp_exc_types, exp_warn_types, condition",
TESTCASES_DICTVIEW_GET)
@simplified_test_function
def test_DictView_get(testcase, obj, key, default, exp_value):
"""
Test function for DictView.get()
"""
# The code to be tested
if default is _OMIT_ARG:
act_value = obj.get(key)
else:
act_value = obj.get(key, default)
# Ensure that exceptions raised in the remainder of this function
# are not mistaken as expected exceptions
assert testcase.exp_exc_types is None
assert act_value == exp_value, \
"Unexpected value at key {k!r} with default {d}". \
format(k=key, d="omitted" if default is _OMIT_ARG else repr(default))
TESTCASES_DICTVIEW_ITEMS = [
# Testcases for DictView.keys(), values(), items()
# Each list item is a testcase tuple with these items:
# * desc: Short testcase description.
# * kwargs: Keyword arguments for the test function:
# * obj: DictView object to be used for the test.
# * exp_items: List with expected items (key,value) in expected order.
# * exp_exc_types: Expected exception type(s), or None.
# * exp_warn_types: Expected warning type(s), or None.
# * condition: Boolean condition for testcase to run, or 'pdb' for debugger
(
"Empty dict",
dict(
obj=DictView({}),
exp_items=[],
),
None, None, True
),
(
"Dict with one item",
dict(
obj=DictView(OrderedDict([('Dog', 'Cat')])),
exp_items=[('Dog', 'Cat')],
),
None, None, True
),
(
"Dict with two items",
dict(
obj=DictView(OrderedDict([('Dog', 'Cat'), ('Budgie', 'Fish')])),
exp_items=[('Dog', 'Cat'), ('Budgie', 'Fish')],
),
None, None, True
),
]
@pytest.mark.parametrize(
"desc, kwargs, exp_exc_types, exp_warn_types, condition",
TESTCASES_DICTVIEW_ITEMS)
@simplified_test_function
def test_DictView_keys(testcase, obj, exp_items):
"""
Test function for DictView.keys()
"""
# The code to be tested
act_keys = obj.keys()
# Also test iterating through the result
act_keys_list = list(act_keys)
# Test that second iteration is possible
act_keys_list2 = list(act_keys)
if not PY2:
# Test __contained__() of the returned view
for key in act_keys_list:
assert key in act_keys
# Ensure that exceptions raised in the remainder of this function
# are not mistaken as expected exceptions
assert testcase.exp_exc_types is None
if PY2:
assert isinstance(act_keys, list)
else:
assert isinstance(act_keys, KeysView)
exp_keys = [item[0] for item in exp_items]
assert act_keys_list == exp_keys
assert act_keys_list2 == exp_keys
@pytest.mark.parametrize(
"desc, kwargs, exp_exc_types, exp_warn_types, condition",
TESTCASES_DICTVIEW_ITEMS)
@simplified_test_function
def test_DictView_values(testcase, obj, exp_items):
"""
Test function for DictView.values()
"""
# The code to be tested
act_values = obj.values()
# Also test iterating through the result
act_values_list = list(act_values)
# Test that second iteration is possible
act_values_list2 = list(act_values)
if not PY2:
# Test __contained__() of the returned view
for value in act_values_list:
assert value in act_values
# Ensure that exceptions raised in the remainder of this function
# are not mistaken as expected exceptions
assert testcase.exp_exc_types is None
if PY2:
assert isinstance(act_values, list)
else:
assert isinstance(act_values, ValuesView)
exp_values = [item[1] for item in exp_items]
assert act_values_list == exp_values
assert act_values_list2 == exp_values
@pytest.mark.parametrize(
"desc, kwargs, exp_exc_types, exp_warn_types, condition",
TESTCASES_DICTVIEW_ITEMS)
@simplified_test_function
def test_DictView_items(testcase, obj, exp_items):
"""
Test function for DictView.items()
"""
# The code to be tested
act_items = obj.items()
# Also test iterating through the result
act_items_list = list(act_items)
# Test that second iteration is possible
act_items_list2 = list(act_items)
if not PY2:
# Test __contained__() of the returned view
for item in act_items_list:
assert item in act_items
# Ensure that exceptions raised in the remainder of this function
# are not mistaken as expected exceptions
assert testcase.exp_exc_types is None
if PY2:
assert isinstance(act_items, list)
else:
assert isinstance(act_items, ItemsView)
assert act_items_list == exp_items
assert act_items_list2 == exp_items
@pytest.mark.parametrize(
"desc, kwargs, exp_exc_types, exp_warn_types, condition",
TESTCASES_DICTVIEW_ITEMS)
@simplified_test_function
def test_DictView_iterkeys(testcase, obj, exp_items):
"""
Test function for DictView.iterkeys()
"""
if not DICT_SUPPORTS_ITER_VIEW:
pytest.skip("Test dictionary does not support iterkeys() method")
assert PY2
# The code to be tested
act_keys = []
for key in obj.iterkeys():
act_keys.append(key)
# Ensure that exceptions raised in the remainder of this function
# are not mistaken as expected exceptions
assert testcase.exp_exc_types is None
exp_keys = [item[0] for item in exp_items]
assert act_keys == exp_keys
@pytest.mark.parametrize(
"desc, kwargs, exp_exc_types, exp_warn_types, condition",
TESTCASES_DICTVIEW_ITEMS)
@simplified_test_function
def test_DictView_itervalues(testcase, obj, exp_items):
"""
Test function for DictView.itervalues()
"""
if not DICT_SUPPORTS_ITER_VIEW:
pytest.skip("Test dictionary does not support itervalues() method")
assert PY2
# The code to be tested
act_values = []
for value in obj.itervalues():
act_values.append(value)
# Ensure that exceptions raised in the remainder of this function
# are not mistaken as expected exceptions
assert testcase.exp_exc_types is None
exp_values = [item[1] for item in exp_items]
assert act_values == exp_values
@pytest.mark.parametrize(
"desc, kwargs, exp_exc_types, exp_warn_types, condition",
TESTCASES_DICTVIEW_ITEMS)
@simplified_test_function
def test_DictView_iteritems(testcase, obj, exp_items):
"""
Test function for DictView.iteritemss()
"""
if not DICT_SUPPORTS_ITER_VIEW:
pytest.skip("Test dictionary does not support iteritems() method")
assert PY2
# The code to be tested
act_items = []
for item in obj.iteritems():
act_items.append(item)
# Ensure that exceptions raised in the remainder of this function
# are not mistaken as expected exceptions
assert testcase.exp_exc_types is None
assert act_items == exp_items
@pytest.mark.parametrize(
"desc, kwargs, exp_exc_types, exp_warn_types, condition",
TESTCASES_DICTVIEW_ITEMS)
@simplified_test_function
def test_DictView_viewkeys(testcase, obj, exp_items):
"""
Test function for DictView.viewkeys() (PY2 only)
"""
if not DICT_SUPPORTS_ITER_VIEW:
pytest.skip("Test dictionary does support viewkeys() method")
assert PY2
# The code to be tested
act_keys = obj.viewkeys()
# Also test iterating through the result
act_keys_list = list(act_keys)
# Test that second iteration is possible
act_keys_list2 = list(act_keys)
# Test __contained__() of the returned view
for key in act_keys_list:
assert key in act_keys
# Ensure that exceptions raised in the remainder of this function
# are not mistaken as expected exceptions
assert testcase.exp_exc_types is None
assert isinstance(act_keys, KeysView)
exp_keys = [item[0] for item in exp_items]
assert act_keys_list == exp_keys
assert act_keys_list2 == exp_keys
@pytest.mark.parametrize(
"desc, kwargs, exp_exc_types, exp_warn_types, condition",
TESTCASES_DICTVIEW_ITEMS)
@simplified_test_function
def test_DictView_viewvalues(testcase, obj, exp_items):
"""
Test function for DictView.viewvalues()
"""
if not DICT_SUPPORTS_ITER_VIEW:
pytest.skip("Test dictionary does not support viewvalues() method")
assert PY2
# The code to be tested
act_values = obj.viewvalues()
# Also test iterating through the result
act_values_list = list(act_values)
# | |
<gh_stars>1-10
"""
This Python script calculates HST ACS WFC photometry.
The class can be used to run calacs for calibrating the raw data.
The class contains also a simple interface for SExtractor that
can be used to find locations of stars in the image.
:depends: PyRAF
:author: <NAME>, for STScI
:history: 02/10/09 Initial Version
:version: 1.0
"""
import os
import pyraf
from pyraf import iraf as I
from pyraf.iraf import stsdas as S
__author__ = '<NAME>'
__version__ = "1.0"
class ACSPhotometry():
"""
A class to do ACS photometry.
"""
#Note: could be cleaned from all useless self declarations.
def __init__(self, path, out):
#should be expanded to be usable from commandline
#for time being, does not use path and output for anything
self.path = path
self.out = out
def omitPhotcorr(self, file):
"""
This function will change PHOTCORR keyword value to OMIT in the header
of given file. The file can contain wildcards i.e. multiple files can
be given on one command..
"""
I.hedit(file, fields='PHOTCORR', value='omit', verify='no', show='no')
def runCalACS(self, file):
"""
Runs calacs for the given file. Uses the default parameters.
"""
S.hst()
S.hst.acs()
S.hst.acs.calacs(file)
def gStat(self, file):
"""
Calculates statistics from given image. Extension must be given
on filename e.g. file.fits[1]
Returns:
All statistics given by IRAF task gstat.
"""
stat = S.gstat(file, fields='doall', Stdout=1)
return stat
def hdiff(self, file1, file2):
"""
Compares headers from two files with hdiff.
:returns: The differences found.
"""
hdiff = S.hdiff(file1, file2, Stdout=1)
return hdiff
def queryKeywords(self, file, keywords):
"""
Queries keywords from header. Can be used to get e.g. exposure times
from multiple raw files with wildcard in file name.
:returns: Queried keywords
"""
self.file = file
self.keywords = keywords
keyw = I.hselect(self.file, self.keywords, 'yes', Stdout=1)
return keyw
def getBandpar(self, configuration):
"""
"""
self.conf = configuration
#example configuration = 'acs,wfc1,f435w'
#load right packages, output should be supressed if possible
S.hst()
S.hst.synphot()
bands = S.hst.synphot.bandpar(self.conf, Stdout=1)
return bands
def calculateZeropoint(self, photflam):
"""
Calculates the zeropoint from a given header keyword photflam value.
:returns: the zeropoint value
"""
import math
self.photflam = photflam
zp = -21.1 - 2.5 * math.log10(self.photflam)
return zp
def getSize(self, file):
"""
This function can be used to get the size of the image. It actually
returns a simple information found from the image header, however,
when parsed correctly the image size is available.
"""
self.file = file
head = I.imheader(self.file, Stdout=1)
return head
def doImcalc(self, input, output, operation):
"""
Simple interface for IRAF task imcalc. Can be used for simple
arithmetic operations between images. This could be rewritten with
PyFits and NumPy to give broader selection of operations.
"""
self.input = input
self.output = output
self.operation = operation
I.imcalc(self.input, self.output, self.operation)
def displayDS9(self, image, buffer):
"""
Displays the given image on DS9. The function requires that
the DS9 is already running. In the future this should be changed.
"""
I.display(image, buffer)
def markStars(self, data, buffer):
"""
Can be used to show an overlapping image on DS9 buffer. The data is taken
from the a table; containing x and y coordinates. Uses a fixed size for the
points.
"""
I.tvmark(buffer, data, mark='point', nx=0, ny=0, points=0.5, color=204)
def interactiveImexam(self, image, frame):
"""
Can be used to call interactive IRAF task imexam for a given frame. DS9 must be
running and a right frame should be given.
"""
I.imexam(image, frame=frame)
def doPhotometryACS(self, file, inputcoords, apertures, zeropoint, bgsigma, skyann, skydann):
"""
This function can be used to do photometry from a given image. Input parameters
can be varied, however, the task assumes that photometry is done from an ACS
image and the exposure time is found from the header of the file.
Object recentering is done with centroid algorithm and shifts up to 6 pixels are
possible. For skyfitting algorithm mode is adopted.
"""
#load packages, should supress the output
I.digiphot()
I.apphot()
#setting up for ACS
I.datapar(sigma=bgsigma, exposure='exptime', gain='ccdgain')
I.centerpars(calgorithm='centroid', cbox=10., maxshift=6.)
I.fitskypars(salgorithm='mode', annulus=skyann,\
dannulus=skydann, skyvalue=0.)
I.photpars(apertures=apertures, zmag=zeropoint)
I.phot(file, coords=inputcoords, verify='no', verbose='no')
def getMeaningfulColumns(self, input, columns, output):
"""
Uses IRAF task tdump for parsing columns from a table.
"""
I.tdump(input, colum=columns, datafil=output)
def mergeTables(self, merged, output):
"""
Can be used to merge multiple tables together.
"""
I.tmerge(merged, output, option='merge')
def changeColumn(self, table, column, name, fmt, unit):
"""
Changes the column name and format that IRAF supports. Also unit can be
set. Verbose output has been supressed.
"""
I.tchcol(table, column, name, newfmt=fmt, newunit=unit, verbose='no')
def calculateac05(self, input, column, operation, outputfmt):
"""
Can be used to calculate the ac05 (from 3 pix to 10pix) aperture correction.
Uses IRAF task tcalc, and adds the calculated values as a new column at the
end of the table.
"""
I.tcalc(input, column, operation, colfmt=outputfmt)
def correctMagnitude(self, input, column, operation, outputfmt):
"""
Essentially the same as calculateac05 function.
"""
I.tcalc(input, column, operation, colfmt=outputfmt)
def useSextractor(input):
"""
Very simple interface for running sextractor for given image. Does not return
anything to the Python program. This function is superseded by the class:
'SexTractor' class written by <NAME> for the Nordic Optical Telescope.
"""
command = 'sex %s' % input
os.system(command)
if __name__ == '__main__':
import glob as G
#import Photometry as P
verbose = False
doAll = False
calibpath = '../../calib/'
log = open('output.log', 'w')
log.write('#This is outputlog of Photometry.py\n\n')
ph = P.ACSPhotometry('path', 'output')
#edits PHOTCORR keywords
if doAll: ph.omitPhotcorr('*raw.fits[0]')
#reduce raw files
if doAll:
for file in G.glob('*_asn.fit*'):
ph.runCalACS(file)
#get some statistics
stats = []
for file in G.glob('*_crj.fit*'):
stats.append(ph.gStat(file + '[1]'))
log.write('\n#Image Statistics:\n')
for line in stats:
for row in line:
log.write(row + '\n')
if verbose: print row
diff = ph.hdiff(calibpath + 'j8c0d1011_crj.fits[1]', 'j8c0d1011_crj.fits[1]')
log.write('\n#Header Difference:\n')
for line in diff:
log.write(line + '\n')
if verbose: print line
#lets check some header keywords
keys = '$l,filter*,exptime,photflam,photzpt'
phots = ph.queryKeywords(calibpath + '*drz.fits[1]', keys)
log.write('\n#Photometric keyword values\n')
for line in phots:
log.write(line + '\n')
if verbose: print line
tmp = line.split()
if tmp[1] == 'F606W':
expf606w = tmp[3]
photflamf606 = tmp[4]
if tmp[2] == 'F435W':
expf435w = tmp[3]
photflamf435 = tmp[4]
if tmp[2] == 'F814W':
expf814w = tmp[3]
photflamf814 = tmp[4]
#could be better with a dictionary?
info = [['F435W', float(expf435w) * 2., float(photflamf435), 'j8c0a1011_drz.fits[1]'],\
['F606W', float(expf606w) * 2., float(photflamf606), 'j8c0c1011_drz.fits[1]'],\
['F814W', float(expf814w) * 2., float(photflamf814), 'j8c0d1011_drz.fits[1]']]
#checks bandpar
log.write('\n#Bandpar values:\n')
for line in info:
bands = ph.getBandpar('acs,wfc1,' + line[0])
for l in bands:
log.write(l + '\n')
if verbose: print l
#calculates zeropoints
log.write('\n#Zeropoints:\n')
for line in info:
zp = ph.calculateZeropoint(line[2])
line.append(zp)
if verbose: print 'Zeropoint of %s is %8.5f' % (line[0], zp)
log.write('Zeropoint of %s is %8.5f\n' % (line[0], zp))
#check image sizes
log.write('\n#Image sizes:\n')
size = ph.getSize(calibpath + '*_drz.fits[1]')
for line in size:
if line.find('j8c0a1011_drz.fits') != -1:
sizea = line[-18:-9]
if line.find('j8c0c1011_drz.fits') != -1:
sizec = line[-18:-9]
if line.find('j8c0d1011_drz.fits') != -1:
sizeb = line[-18:-9]
#find the sizes:
dim = [sizea.split(','), sizeb.split(','), sizec.split(',')]
xdim = [float(d[0]) for d in dim]
ydim = [float(d[1]) for d in dim]
#Do the imcalc
if verbose: print 'Will make all the images of same size...'
if doAll:
for line in info:
ph.doImcalc(calibpath + line[3] + '[1:%s,1:%s]' % (str(int(min(xdim))), str(int(min(ydim)))),\
line[0] + '_drz.fits', 'im1*' + str(line[1]))
#make the colour image
if verbose: print 'Will combine three images...'
weight = info[0][2] / info[1][2]
if doAll:
ph.doImcalc('F435W_drz.fits,F606W_drz.fits,F814W_drz.fits', 'colour_drz.fits',\
'''"(%s*im1 + im2 + im3)/6."''' % str(weight))
#lets find the stars from the colour image with sextractor
if verbose: print 'Will use sextractor to find stars...'
useSextractor('colour_drz.fits')
#will finally do the photometry
#I will use fixed values at this point. These values were manually measured with imexam
bgsigma = 4.1 #this could be catched from sextractor, but probably gives wrong values fro drizzled image
skyann = 10.
skydann = 5.
coords = 'output.cat' #from sextractor
for line in info:
file = line[0] + '_drz.fits[1]'
ph.doPhotometryACS(file, coords, '3,10', line[4], bgsigma, skyann, skydann)
#lets get the columns we will need
for line in info:
input = line[0] + '_drz.fits1.mag.1'
columns = 'c7,8,15,29,30,31,38,39,40'
output = line[0] + '.phota'
ph.getMeaningfulColumns(input, columns, output)
#lets merge the | |
""" Broadly applicable NGS processing/analysis functionality """
import os
import re
import subprocess
import errno
from attmap import AttMapEcho
from yacman import load_yaml
from .exceptions import UnsupportedFiletypeException
from .utils import is_fastq, is_gzipped_fastq, is_sam_or_bam
class NGSTk(AttMapEcho):
"""
Class to hold functions to build command strings used during pipeline runs.
Object can be instantiated with a string of a path to a yaml `pipeline config file`.
Since NGSTk inherits from `AttMapEcho`, the passed config file and its elements
will be accessible through the NGSTk object as attributes under `config` (e.g.
`NGSTk.tools.java`). In case no `config_file` argument is passed, all commands will
be returned assuming the tool is in the user's $PATH.
:param str config_file: Path to pipeline yaml config file (optional).
:param pypiper.PipelineManager pm: A PipelineManager with which to associate this toolkit instance;
that is, essentially a source from which to grab paths to tools,
resources, etc.
:Example:
from pypiper.ngstk import NGSTk as tk
tk = NGSTk()
tk.samtools_index("sample.bam")
# returns: samtools index sample.bam
# Using a configuration file (custom executable location):
from pypiper.ngstk import NGSTk
tk = NGSTk("pipeline_config_file.yaml")
tk.samtools_index("sample.bam")
# returns: /home/.local/samtools/bin/samtools index sample.bam
"""
def __init__(self, config_file=None, pm=None):
# parse yaml into the project's attributes
# self.add_entries(**config)
super(NGSTk, self).__init__(
None if config_file is None else load_yaml(config_file))
# Keep a link to the pipeline manager, if one is provided.
# if None is provided, instantiate "tools" and "parameters" with empty AttMaps
# this allows the usage of the same code for a command with and without using a pipeline manager
if pm is not None:
self.pm = pm
if hasattr(pm.config, "tools"):
self.tools = self.pm.config.tools
else:
self.tools = AttMapEcho()
if hasattr(pm.config, "parameters"):
self.parameters = self.pm.config.parameters
else:
self.parameters = AttMapEcho()
else:
self.tools = AttMapEcho()
self.parameters = AttMapEcho()
# If pigz is available, use that. Otherwise, default to gzip.
if hasattr(self.pm, "cores") and self.pm.cores > 1 and self.check_command("pigz"):
self.ziptool_cmd = "pigz -f -p {}".format(self.pm.cores)
else:
self.ziptool_cmd = "gzip -f"
def _ensure_folders(self, *paths):
"""
Ensure that paths to folder(s) exist.
Some command-line tools will not attempt to create folder(s) needed
for output path to exist. They instead assume that they already are
present and will fail if that assumption does not hold.
:param Iterable[str] paths: Collection of path for which
"""
for p in paths:
# Only provide assurance for absolute paths.
if not p or not os.path.isabs(p):
continue
# See if what we're assuring is file- or folder-like.
fpath, fname = os.path.split(p)
base, ext = os.path.splitext(fname)
# If there's no extension, ensure that we have the whole path.
# Otherwise, just ensure that we have path to file's folder.
self.make_dir(fpath if ext else p)
@property
def ziptool(self):
"""
Returns the command to use for compressing/decompressing.
:return str: Either 'gzip' or 'pigz' if installed and multiple cores
"""
return self.ziptool_cmd
def make_dir(self, path):
"""
Forge path to directory, creating intermediates as needed.
:param str path: Path to create.
"""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def make_sure_path_exists(self, path):
""" Alias for make_dir """
self.make_dir(path)
# Borrowed from looper
def check_command(self, command):
"""
Check if command can be called.
"""
# Use `command` to see if command is callable, store exit code
code = os.system("command -v {0} >/dev/null 2>&1 || {{ exit 1; }}".format(command))
# If exit code is not 0, report which command failed and return False, else return True
if code != 0:
print("Command is not callable: {0}".format(command))
return False
else:
return True
def get_file_size(self, filenames):
"""
Get size of all files in string (space-separated) in megabytes (Mb).
:param str filenames: a space-separated string of filenames
"""
# use (1024 ** 3) for gigabytes
# equivalent to: stat -Lc '%s' filename
# If given a list, recurse through it.
if type(filenames) is list:
return sum([self.get_file_size(filename) for filename in filenames])
return round(sum([float(os.stat(f).st_size) for f in filenames.split(" ")]) / (1024 ** 2), 4)
def mark_duplicates(self, aligned_file, out_file, metrics_file, remove_duplicates="True"):
cmd = self.tools.java
if self.pm.javamem: # If a memory restriction exists.
cmd += " -Xmx" + self.pm.javamem
cmd += " -jar " + self.tools.picard + " MarkDuplicates"
cmd += " INPUT=" + aligned_file
cmd += " OUTPUT=" + out_file
cmd += " METRICS_FILE=" + metrics_file
cmd += " REMOVE_DUPLICATES=" + remove_duplicates
return cmd
def bam2fastq(self, input_bam, output_fastq,
output_fastq2=None, unpaired_fastq=None):
"""
Create command to convert BAM(s) to FASTQ(s).
:param str input_bam: Path to sequencing reads file to convert
:param output_fastq: Path to FASTQ to write
:param output_fastq2: Path to (R2) FASTQ to write
:param unpaired_fastq: Path to unpaired FASTQ to write
:return str: Command to convert BAM(s) to FASTQ(s)
"""
self._ensure_folders(output_fastq, output_fastq2, unpaired_fastq)
cmd = self.tools.java + " -Xmx" + self.pm.javamem
cmd += " -jar " + self.tools.picard + " SamToFastq"
cmd += " INPUT={0}".format(input_bam)
cmd += " FASTQ={0}".format(output_fastq)
if output_fastq2 is not None and unpaired_fastq is not None:
cmd += " SECOND_END_FASTQ={0}".format(output_fastq2)
cmd += " UNPAIRED_FASTQ={0}".format(unpaired_fastq)
return cmd
def bam_to_fastq(self, bam_file, out_fastq_pre, paired_end):
"""
Build command to convert BAM file to FASTQ file(s) (R1/R2).
:param str bam_file: path to BAM file with sequencing reads
:param str out_fastq_pre: path prefix for output FASTQ file(s)
:param bool paired_end: whether the given file contains paired-end
or single-end sequencing reads
:return str: file conversion command, ready to run
"""
self.make_sure_path_exists(os.path.dirname(out_fastq_pre))
cmd = self.tools.java + " -Xmx" + self.pm.javamem
cmd += " -jar " + self.tools.picard + " SamToFastq"
cmd += " I=" + bam_file
cmd += " F=" + out_fastq_pre + "_R1.fastq"
if paired_end:
cmd += " F2=" + out_fastq_pre + "_R2.fastq"
cmd += " INCLUDE_NON_PF_READS=true"
cmd += " QUIET=true"
cmd += " VERBOSITY=ERROR"
cmd += " VALIDATION_STRINGENCY=SILENT"
return cmd
def bam_to_fastq_awk(self, bam_file, out_fastq_pre, paired_end, zipmode=False):
"""
This converts bam file to fastq files, but using awk. As of 2016, this is much faster
than the standard way of doing this using Picard, and also much faster than the
bedtools implementation as well; however, it does no sanity checks and assumes the reads
(for paired data) are all paired (no singletons), in the correct order.
:param bool zipmode: Should the output be zipped?
"""
self.make_sure_path_exists(os.path.dirname(out_fastq_pre))
fq1 = out_fastq_pre + "_R1.fastq"
fq2 = out_fastq_pre + "_R2.fastq"
if zipmode:
fq1 = fq1 + ".gz"
fq2 = fq2 + ".gz"
fq1_target = " | \"" + self.ziptool + " -c > " + fq1 + '"'
fq2_target = " | \"" + self.ziptool + " -c > " + fq2 + '"'
else:
fq1_target = ' > "' + fq1 + '"'
fq2_target = ' > "' + fq2 + '"'
if paired_end:
cmd = self.tools.samtools + " view " + bam_file + " | awk '"
cmd += r'{ if (NR%2==1) print "@"$1"/1\n"$10"\n+\n"$11' + fq1_target + ';'
cmd += r' else print "@"$1"/2\n"$10"\n+\n"$11' + fq2_target + '; }'
cmd += "'" # end the awk command
else:
fq2 = None
cmd = self.tools.samtools + " view " + bam_file + " | awk '"
cmd += r'{ print "@"$1"\n"$10"\n+\n"$11' + fq1_target + '; }'
cmd += "'"
return cmd, fq1, fq2
def bam_to_fastq_bedtools(self, bam_file, out_fastq_pre, paired_end):
"""
Converts bam to fastq; A version using bedtools
"""
self.make_sure_path_exists(os.path.dirname(out_fastq_pre))
fq1 = out_fastq_pre + "_R1.fastq"
fq2 = None
cmd = self.tools.bedtools + " bamtofastq -i " + bam_file + " -fq " + fq1 + ".fastq"
if paired_end:
fq2 = out_fastq_pre + "_R2.fastq"
cmd += " -fq2 " + fq2
return cmd, fq1, fq2
def get_input_ext(self, input_file):
"""
Get the extension of the input_file. Assumes you're using either
.bam or .fastq/.fq or .fastq.gz/.fq.gz.
"""
if input_file.endswith(".bam"):
input_ext = ".bam"
elif input_file.endswith(".fastq.gz") or input_file.endswith(".fq.gz"):
input_ext = ".fastq.gz"
elif input_file.endswith(".fastq") or input_file.endswith(".fq"):
input_ext = ".fastq"
else:
errmsg = "'{}'; this pipeline can only deal with .bam, .fastq, " \
"or .fastq.gz files".format(input_file)
raise UnsupportedFiletypeException(errmsg)
return input_ext
def merge_or_link(self, input_args, raw_folder, local_base="sample"):
"""
Standardizes various input possibilities by converting either .bam,
.fastq, or .fastq.gz files into a local file; merging those | |
<gh_stars>1-10
from __future__ import print_function, division, absolute_import
import numpy as np
from llvmlite.llvmpy.core import Type, Builder, ICMP_EQ, Constant
from numba import types, cgutils, compiler
from ..caching import make_library_cache, NullCache
def _build_ufunc_loop_body(load, store, context, func, builder, arrays, out,
offsets, store_offset, signature, pyapi, env):
elems = load()
# Compute
status, retval = context.call_conv.call_function(builder, func,
signature.return_type,
signature.args, elems)
# Store
with builder.if_else(status.is_ok, likely=True) as (if_ok, if_error):
with if_ok:
store(retval)
with if_error:
gil = pyapi.gil_ensure()
context.call_conv.raise_error(builder, pyapi, status)
pyapi.gil_release(gil)
# increment indices
for off, ary in zip(offsets, arrays):
builder.store(builder.add(builder.load(off), ary.step), off)
builder.store(builder.add(builder.load(store_offset), out.step),
store_offset)
return status.code
def _build_ufunc_loop_body_objmode(load, store, context, func, builder,
arrays, out, offsets, store_offset,
signature, env, pyapi):
elems = load()
# Compute
_objargs = [types.pyobject] * len(signature.args)
# We need to push the error indicator to avoid it messing with
# the ufunc's execution. We restore it unless the ufunc raised
# a new error.
with pyapi.err_push(keep_new=True):
status, retval = context.call_conv.call_function(builder, func,
types.pyobject,
_objargs, elems)
# Release owned reference to arguments
for elem in elems:
pyapi.decref(elem)
# NOTE: if an error occurred, it will be caught by the Numpy machinery
# Store
store(retval)
# increment indices
for off, ary in zip(offsets, arrays):
builder.store(builder.add(builder.load(off), ary.step), off)
builder.store(builder.add(builder.load(store_offset), out.step),
store_offset)
return status.code
def build_slow_loop_body(context, func, builder, arrays, out, offsets,
store_offset, signature, pyapi, env):
def load():
elems = [ary.load_direct(builder.load(off))
for off, ary in zip(offsets, arrays)]
return elems
def store(retval):
out.store_direct(retval, builder.load(store_offset))
return _build_ufunc_loop_body(load, store, context, func, builder, arrays,
out, offsets, store_offset, signature, pyapi,
env=env)
def build_obj_loop_body(context, func, builder, arrays, out, offsets,
store_offset, signature, pyapi, envptr, env):
env_body = context.get_env_body(builder, envptr)
env_manager = pyapi.get_env_manager(env, env_body, envptr)
def load():
# Load
elems = [ary.load_direct(builder.load(off))
for off, ary in zip(offsets, arrays)]
# Box
elems = [pyapi.from_native_value(t, v, env_manager)
for v, t in zip(elems, signature.args)]
return elems
def store(retval):
is_ok = cgutils.is_not_null(builder, retval)
# If an error is raised by the object mode ufunc, it will
# simply get caught by the Numpy ufunc machinery.
with builder.if_then(is_ok, likely=True):
# Unbox
native = pyapi.to_native_value(signature.return_type, retval)
assert native.cleanup is None
# Store
out.store_direct(native.value, builder.load(store_offset))
# Release owned reference
pyapi.decref(retval)
return _build_ufunc_loop_body_objmode(load, store, context, func, builder,
arrays, out, offsets, store_offset,
signature, envptr, pyapi)
def build_fast_loop_body(context, func, builder, arrays, out, offsets,
store_offset, signature, ind, pyapi, env):
def load():
elems = [ary.load_aligned(ind)
for ary in arrays]
return elems
def store(retval):
out.store_aligned(retval, ind)
return _build_ufunc_loop_body(load, store, context, func, builder, arrays,
out, offsets, store_offset, signature, pyapi,
env=env)
def build_ufunc_wrapper(library, context, fname, signature, objmode, cres):
"""
Wrap the scalar function with a loop that iterates over the arguments
"""
assert isinstance(fname, str)
byte_t = Type.int(8)
byte_ptr_t = Type.pointer(byte_t)
byte_ptr_ptr_t = Type.pointer(byte_ptr_t)
intp_t = context.get_value_type(types.intp)
intp_ptr_t = Type.pointer(intp_t)
fnty = Type.function(Type.void(), [byte_ptr_ptr_t, intp_ptr_t,
intp_ptr_t, byte_ptr_t])
wrapperlib = context.codegen().create_library('ufunc_wrapper')
wrapper_module = wrapperlib.create_ir_module('')
if objmode:
func_type = context.call_conv.get_function_type(
types.pyobject, [types.pyobject] * len(signature.args))
else:
func_type = context.call_conv.get_function_type(
signature.return_type, signature.args)
func = wrapper_module.add_function(func_type, name=fname)
func.attributes.add("alwaysinline")
wrapper = wrapper_module.add_function(fnty, "__ufunc__." + func.name)
arg_args, arg_dims, arg_steps, arg_data = wrapper.args
arg_args.name = "args"
arg_dims.name = "dims"
arg_steps.name = "steps"
arg_data.name = "data"
builder = Builder(wrapper.append_basic_block("entry"))
# Prepare Environment
envname = context.get_env_name(cres.fndesc)
env = cres.environment
envptr = builder.load(context.declare_env_global(builder.module, envname))
# Emit loop
loopcount = builder.load(arg_dims, name="loopcount")
# Prepare inputs
arrays = []
for i, typ in enumerate(signature.args):
arrays.append(UArrayArg(context, builder, arg_args, arg_steps, i, typ))
# Prepare output
out = UArrayArg(context, builder, arg_args, arg_steps, len(arrays),
signature.return_type)
# Setup indices
offsets = []
zero = context.get_constant(types.intp, 0)
for _ in arrays:
p = cgutils.alloca_once(builder, intp_t)
offsets.append(p)
builder.store(zero, p)
store_offset = cgutils.alloca_once(builder, intp_t)
builder.store(zero, store_offset)
unit_strided = cgutils.true_bit
for ary in arrays:
unit_strided = builder.and_(unit_strided, ary.is_unit_strided)
pyapi = context.get_python_api(builder)
if objmode:
# General loop
gil = pyapi.gil_ensure()
with cgutils.for_range(builder, loopcount, intp=intp_t):
slowloop = build_obj_loop_body(context, func, builder,
arrays, out, offsets,
store_offset, signature,
pyapi, envptr, env)
pyapi.gil_release(gil)
builder.ret_void()
else:
with builder.if_else(unit_strided) as (is_unit_strided, is_strided):
with is_unit_strided:
with cgutils.for_range(builder, loopcount, intp=intp_t) as loop:
fastloop = build_fast_loop_body(context, func, builder,
arrays, out, offsets,
store_offset, signature,
loop.index, pyapi,
env=envptr)
with is_strided:
# General loop
with cgutils.for_range(builder, loopcount, intp=intp_t):
slowloop = build_slow_loop_body(context, func, builder,
arrays, out, offsets,
store_offset, signature,
pyapi, env=envptr)
builder.ret_void()
del builder
# Link and finalize
wrapperlib.add_ir_module(wrapper_module)
wrapperlib.add_linking_library(library)
return wrapperlib.get_pointer_to_function(wrapper.name)
class UArrayArg(object):
def __init__(self, context, builder, args, steps, i, fe_type):
self.context = context
self.builder = builder
self.fe_type = fe_type
offset = self.context.get_constant(types.intp, i)
offseted_args = self.builder.load(builder.gep(args, [offset]))
data_type = context.get_data_type(fe_type)
self.dataptr = self.builder.bitcast(offseted_args,
data_type.as_pointer())
sizeof = self.context.get_abi_sizeof(data_type)
self.abisize = self.context.get_constant(types.intp, sizeof)
offseted_step = self.builder.gep(steps, [offset])
self.step = self.builder.load(offseted_step)
self.is_unit_strided = builder.icmp(ICMP_EQ, self.abisize, self.step)
self.builder = builder
def load_direct(self, byteoffset):
"""
Generic load from the given *byteoffset*. load_aligned() is
preferred if possible.
"""
ptr = cgutils.pointer_add(self.builder, self.dataptr, byteoffset)
return self.context.unpack_value(self.builder, self.fe_type, ptr)
def load_aligned(self, ind):
# Using gep() instead of explicit pointer addition helps LLVM
# vectorize the loop.
ptr = self.builder.gep(self.dataptr, [ind])
return self.context.unpack_value(self.builder, self.fe_type, ptr)
def store_direct(self, value, byteoffset):
ptr = cgutils.pointer_add(self.builder, self.dataptr, byteoffset)
self.context.pack_value(self.builder, self.fe_type, value, ptr)
def store_aligned(self, value, ind):
ptr = self.builder.gep(self.dataptr, [ind])
self.context.pack_value(self.builder, self.fe_type, value, ptr)
GufWrapperCache = make_library_cache('guf')
class _GufuncWrapper(object):
def __init__(self, py_func, cres, sin, sout, cache):
self.py_func = py_func
self.cres = cres
self.sin = sin
self.sout = sout
self.is_objectmode = self.signature.return_type == types.pyobject
self.cache = (GufWrapperCache(py_func=self.py_func)
if cache else NullCache())
@property
def library(self):
return self.cres.library
@property
def context(self):
return self.cres.target_context
@property
def call_conv(self):
return self.context.call_conv
@property
def signature(self):
return self.cres.signature
@property
def fndesc(self):
return self.cres.fndesc
@property
def env(self):
return self.cres.environment
def _build_wrapper(self, library, name):
"""
The LLVM IRBuilder code to create the gufunc wrapper.
The *library* arg is the CodeLibrary for which the wrapper should
be added to. The *name* arg is the name of the wrapper function being
created.
"""
byte_t = Type.int(8)
byte_ptr_t = Type.pointer(byte_t)
byte_ptr_ptr_t = Type.pointer(byte_ptr_t)
intp_t = self.context.get_value_type(types.intp)
intp_ptr_t = Type.pointer(intp_t)
fnty = Type.function(Type.void(), [byte_ptr_ptr_t, intp_ptr_t,
intp_ptr_t, byte_ptr_t])
wrapper_module = library.create_ir_module('_gufunc_wrapper')
func_type = self.call_conv.get_function_type(self.fndesc.restype,
self.fndesc.argtypes)
fname = self.fndesc.llvm_func_name
func = wrapper_module.add_function(func_type, name=fname)
func.attributes.add("alwaysinline")
wrapper = wrapper_module.add_function(fnty, name)
arg_args, arg_dims, arg_steps, arg_data = wrapper.args
arg_args.name = "args"
arg_dims.name = "dims"
arg_steps.name = "steps"
arg_data.name = "data"
builder = Builder(wrapper.append_basic_block("entry"))
loopcount = builder.load(arg_dims, name="loopcount")
pyapi = self.context.get_python_api(builder)
# Unpack shapes
unique_syms = set()
for grp in (self.sin, self.sout):
for syms in grp:
unique_syms |= set(syms)
sym_map = {}
for syms in self.sin:
for s in syms:
if s not in sym_map:
sym_map[s] = len(sym_map)
sym_dim = {}
for s, i in sym_map.items():
sym_dim[s] = builder.load(builder.gep(arg_dims,
[self.context.get_constant(
types.intp,
i + 1)]))
# Prepare inputs
arrays = []
step_offset = len(self.sin) + len(self.sout)
for i, (typ, sym) in enumerate(zip(self.signature.args,
self.sin + self.sout)):
ary = GUArrayArg(self.context, builder, arg_args,
arg_steps, i, step_offset, typ, sym, sym_dim)
step_offset += len(sym)
arrays.append(ary)
bbreturn = builder.append_basic_block('.return')
# Prologue
self.gen_prologue(builder, pyapi)
# Loop
with cgutils.for_range(builder, loopcount, intp=intp_t) as loop:
args = [a.get_array_at_offset(loop.index) for a in arrays]
innercall, error = self.gen_loop_body(builder, pyapi, func, args)
# If error, escape
cgutils.cbranch_or_continue(builder, error, bbreturn)
builder.branch(bbreturn)
builder.position_at_end(bbreturn)
# Epilogue
self.gen_epilogue(builder, pyapi)
builder.ret_void()
# Link
library.add_ir_module(wrapper_module)
library.add_linking_library(self.library)
def build(self):
# Use cache and compiler in a critical section
with compiler.lock_compiler:
wrapperlib = self.cache.load_overload(self.cres.signature, self.cres.target_context)
wrapper_name = "__gufunc__." + self.fndesc.mangled_name
if wrapperlib is None:
# Create library and enable caching
wrapperlib = self.context.codegen().create_library(str(self))
wrapperlib.enable_object_caching()
# Build wrapper
self._build_wrapper(wrapperlib, wrapper_name)
# Cache
self.cache.save_overload(self.cres.signature, wrapperlib)
# Finalize and get function pointer
ptr = wrapperlib.get_pointer_to_function(wrapper_name)
return ptr, self.env, wrapper_name
def gen_loop_body(self, builder, pyapi, func, args):
status, retval = self.call_conv.call_function(
builder, func, self.signature.return_type, self.signature.args,
args)
with builder.if_then(status.is_error, likely=False):
gil = pyapi.gil_ensure()
self.context.call_conv.raise_error(builder, pyapi, status)
pyapi.gil_release(gil)
return status.code, status.is_error
def gen_prologue(self, builder, pyapi):
pass # Do nothing
def gen_epilogue(self, builder, pyapi):
pass # Do nothing
class _GufuncObjectWrapper(_GufuncWrapper):
def gen_loop_body(self, builder, pyapi, func, args):
innercall, error = _prepare_call_to_object_mode(self.context,
builder, pyapi, func,
self.signature,
args)
return innercall, error
def gen_prologue(self, builder, pyapi):
# Acquire the GIL
self.gil = pyapi.gil_ensure()
def gen_epilogue(self, builder, pyapi):
# Release GIL
pyapi.gil_release(self.gil)
def build_gufunc_wrapper(py_func, cres, sin, sout, cache):
signature = cres.signature
wrapcls = (_GufuncObjectWrapper
if signature.return_type == types.pyobject
else _GufuncWrapper)
return wrapcls(py_func, cres, sin, sout, cache).build()
def _prepare_call_to_object_mode(context, builder, pyapi, func,
signature, args):
mod = builder.module
bb_core_return = builder.append_basic_block('ufunc.core.return')
# Call to
# PyObject* ndarray_new(int nd,
# npy_intp *dims, /* shape | |
<reponame>barentsen/photutils
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import math
import warnings
import numpy as np
from astropy.coordinates import SkyCoord
import astropy.units as u
from astropy.wcs.utils import skycoord_to_pixel
from astropy.utils.exceptions import AstropyUserWarning
from .core import (SkyAperture, PixelAperture, _sanitize_pixel_positions,
_make_annulus_path, _get_phot_extents, _calc_aperture_var)
from ..utils.wcs_helpers import (skycoord_to_pixel_scale_angle, assert_angle,
assert_angle_or_pixel)
skycoord_to_pixel_mode = 'all'
__all__ = ['SkyEllipticalAperture', 'EllipticalAperture',
'SkyEllipticalAnnulus', 'EllipticalAnnulus']
def do_elliptical_photometry(data, positions, a, b, theta, error,
pixelwise_error, method, subpixels, a_in=None):
extents = np.zeros((len(positions), 4), dtype=int)
# TODO: we can be more efficient in terms of bounding box
radius = max(a, b)
extents[:, 0] = positions[:, 0] - radius + 0.5
extents[:, 1] = positions[:, 0] + radius + 1.5
extents[:, 2] = positions[:, 1] - radius + 0.5
extents[:, 3] = positions[:, 1] + radius + 1.5
ood_filter, extent, phot_extent = _get_phot_extents(data, positions,
extents)
flux = u.Quantity(np.zeros(len(positions), dtype=np.float), unit=data.unit)
if error is not None:
fluxvar = u.Quantity(np.zeros(len(positions), dtype=np.float),
unit=error.unit ** 2)
# TODO: flag these objects
if np.sum(ood_filter):
flux[ood_filter] = np.nan
warnings.warn("The aperture at position {0} does not have any "
"overlap with the data"
.format(positions[ood_filter]),
AstropyUserWarning)
if np.sum(ood_filter) == len(positions):
return flux
x_min, x_max, y_min, y_max = extent
x_pmin, x_pmax, y_pmin, y_pmax = phot_extent
if method == 'center':
use_exact = 0
subpixels = 1
elif method == 'subpixel':
use_exact = 0
else:
use_exact = 1
subpixels = 1
from ..geometry import elliptical_overlap_grid
for i in range(len(flux)):
if not np.isnan(flux[i]):
fraction = elliptical_overlap_grid(x_pmin[i], x_pmax[i],
y_pmin[i], y_pmax[i],
x_max[i] - x_min[i],
y_max[i] - y_min[i],
a, b, theta, use_exact,
subpixels)
if a_in is not None:
b_in = a_in * b / a
fraction -= elliptical_overlap_grid(x_pmin[i], x_pmax[i],
y_pmin[i], y_pmax[i],
x_max[i] - x_min[i],
y_max[i] - y_min[i],
a_in, b_in, theta,
use_exact, subpixels)
flux[i] = np.sum(data[y_min[i]:y_max[i],
x_min[i]:x_max[i]] * fraction)
if error is not None:
fluxvar[i] = _calc_aperture_var(
data, fraction, error, flux[i], x_min[i], x_max[i],
y_min[i], y_max[i], pixelwise_error)
if error is None:
return flux
else:
return flux, np.sqrt(fluxvar)
def get_elliptical_fractions(data, positions, a, b, theta,
method, subpixels, a_in=None):
extents = np.zeros((len(positions), 4), dtype=int)
# TODO: we can be more efficient in terms of bounding box
radius = max(a, b)
extents[:, 0] = positions[:, 0] - radius + 0.5
extents[:, 1] = positions[:, 0] + radius + 1.5
extents[:, 2] = positions[:, 1] - radius + 0.5
extents[:, 3] = positions[:, 1] + radius + 1.5
ood_filter, extent, phot_extent = _get_phot_extents(data, positions,
extents)
fractions = np.zeros((data.shape[0], data.shape[1], len(positions)),
dtype=np.float)
# TODO: flag these objects
if np.sum(ood_filter):
warnings.warn("The aperture at position {0} does not have any "
"overlap with the data"
.format(positions[ood_filter]),
AstropyUserWarning)
if np.sum(ood_filter) == len(positions):
return np.squeeze(fractions)
x_min, x_max, y_min, y_max = extent
x_pmin, x_pmax, y_pmin, y_pmax = phot_extent
if method == 'center':
use_exact = 0
subpixels = 1
elif method == 'subpixel':
use_exact = 0
else:
use_exact = 1
subpixels = 1
from ..geometry import elliptical_overlap_grid
for i in range(len(positions)):
if ood_filter[i] is not True:
fractions[y_min[i]: y_max[i], x_min[i]: x_max[i], i] = \
elliptical_overlap_grid(x_pmin[i], x_pmax[i],
y_pmin[i], y_pmax[i],
x_max[i] - x_min[i],
y_max[i] - y_min[i],
a, b, theta, use_exact,
subpixels)
if a_in is not None:
b_in = a_in * b / a
fractions[y_min[i]: y_max[i], x_min[i]: x_max[i], i] -= \
elliptical_overlap_grid(x_pmin[i], x_pmax[i],
y_pmin[i], y_pmax[i],
x_max[i] - x_min[i],
y_max[i] - y_min[i],
a_in, b_in, theta,
use_exact, subpixels)
return np.squeeze(fractions)
class SkyEllipticalAperture(SkyAperture):
"""
Elliptical aperture(s), defined in sky coordinates.
Parameters
----------
positions : `~astropy.coordinates.SkyCoord`
Celestial coordinates of the aperture center(s). This can be either
scalar coordinates or an array of coordinates.
a : `~astropy.units.Quantity`
The semimajor axis, either in angular or pixel units.
b : `~astropy.units.Quantity`
The semiminor axis, either in angular or pixel units.
theta : `~astropy.units.Quantity`
The position angle of the semimajor axis (counterclockwise), either
in angular or pixel units.
"""
def __init__(self, positions, a, b, theta):
if isinstance(positions, SkyCoord):
self.positions = positions
else:
raise TypeError("positions should be a SkyCoord instance")
assert_angle_or_pixel('a', a)
assert_angle_or_pixel('b', b)
assert_angle('theta', theta)
if a.unit.physical_type != b.unit.physical_type:
raise ValueError("a and b should either both be angles "
"or in pixels")
self.a = a
self.b = b
self.theta = theta
def to_pixel(self, wcs):
"""
Return a EllipticalAperture instance in pixel coordinates.
"""
x, y = skycoord_to_pixel(self.positions, wcs,
mode=skycoord_to_pixel_mode)
central_pos = SkyCoord([wcs.wcs.crval], frame=self.positions.name,
unit=wcs.wcs.cunit)
xc, yc, scale, angle = skycoord_to_pixel_scale_angle(central_pos, wcs)
if self.a.unit.physical_type == 'angle':
a = (scale * self.a).to(u.pixel).value
b = (scale * self.b).to(u.pixel).value
else: # pixel
a = self.a.value
b = self.b.value
theta = (angle + self.theta).to(u.radian).value
pixel_positions = np.array([x, y]).transpose()
return EllipticalAperture(pixel_positions, a, b, theta)
class EllipticalAperture(PixelAperture):
"""
Elliptical aperture(s), defined in pixel coordinates.
Parameters
----------
positions : tuple, list, array, or `~astropy.units.Quantity`
Pixel coordinates of the aperture center(s), either as a single
``(x, y)`` tuple, a list of ``(x, y)`` tuples, an ``Nx2`` or
``2xN`` `~numpy.ndarray`, or an ``Nx2`` or ``2xN``
`~astropy.units.Quantity` in units of pixels. A ``2x2``
`~numpy.ndarray` or `~astropy.units.Quantity` is interpreted as
``Nx2``, i.e. two rows of (x, y) coordinates.
a : float
The semimajor axis.
b : float
The semiminor axis.
theta : float
The position angle of the semimajor axis in radians
(counterclockwise).
Raises
------
ValueError : `ValueError`
If either axis (``a`` or ``b``) is negative.
"""
def __init__(self, positions, a, b, theta):
try:
self.a = float(a)
self.b = float(b)
self.theta = float(theta)
except TypeError:
raise TypeError("'a' and 'b' and 'theta' must be numeric, "
"received {0} and {1} and {2}."
.format((type(a), type(b), type(theta))))
if a < 0 or b < 0:
raise ValueError("'a' and 'b' must be non-negative.")
self.positions = _sanitize_pixel_positions(positions)
def area(self):
return math.pi * self.a * self.b
def do_photometry(self, data, error=None, pixelwise_error=True,
method='exact', subpixels=5):
if method not in ('center', 'subpixel', 'exact'):
raise ValueError('{0} method not supported for aperture class '
'{1}'.format(method, self.__class__.__name__))
flux = do_elliptical_photometry(data, self.positions,
self.a, self.b, self.theta,
error=error,
pixelwise_error=pixelwise_error,
method=method,
subpixels=subpixels)
return flux
def get_fractions(self, data, method='exact', subpixels=5):
if method not in ('center', 'subpixel', 'exact'):
raise ValueError('{0} method not supported for aperture class '
'{1}'.format(method, self.__class__.__name__))
fractions = get_elliptical_fractions(data, self.positions,
self.a, self.b, self.theta,
method=method,
subpixels=subpixels)
return fractions
def plot(self, origin=(0, 0), source_id=None, ax=None, fill=False,
**kwargs):
import matplotlib.patches as mpatches
plot_positions, ax, kwargs = self._prepare_plot(
origin, source_id, ax, fill, **kwargs)
theta_deg = self.theta * 180. / np.pi
for position in plot_positions:
patch = mpatches.Ellipse(position, 2.*self.a, 2.*self.b,
theta_deg, **kwargs)
ax.add_patch(patch)
class SkyEllipticalAnnulus(SkyAperture):
"""
Elliptical annulus aperture(s), defined in sky coordinates.
Parameters
----------
positions : `~astropy.coordinates.SkyCoord`
Celestial coordinates of the aperture center(s). This can be either
scalar coordinates or an array of coordinates.
a_in : `~astropy.units.Quantity`
The inner semimajor axis, either in angular or pixel units.
a_out : `~astropy.units.Quantity`
The outer semimajor axis, either in angular or pixel units.
b_out : `~astropy.units.Quantity`
The outer semiminor axis, either in angular or pixel units. The inner
semiminor axis is determined by scaling by a_in/a_out.
theta : `~astropy.units.Quantity`
The position angle of the semimajor axis (counterclockwise), either
in angular or pixel units.
"""
def __init__(self, positions, a_in, a_out, b_out, theta):
if isinstance(positions, SkyCoord):
self.positions = positions
else:
raise TypeError("positions should be a SkyCoord instance")
assert_angle_or_pixel('a_in', a_in)
assert_angle_or_pixel('a_out', a_out)
assert_angle_or_pixel('b_out', b_out)
assert_angle('theta', theta)
if a_in.unit.physical_type != a_out.unit.physical_type:
raise ValueError("a_in and a_out should either both be angles "
"or in pixels")
if a_out.unit.physical_type != b_out.unit.physical_type:
raise ValueError("a_out and b_out should either both be angles "
"or in pixels")
self.a_in = a_in
self.a_out = a_out
self.b_out = b_out
self.theta = theta
def to_pixel(self, wcs):
"""
Return a EllipticalAnnulus instance in pixel coordinates.
"""
x, y = skycoord_to_pixel(self.positions, wcs,
mode=skycoord_to_pixel_mode)
central_pos = SkyCoord([wcs.wcs.crval], frame=self.positions.name,
unit=wcs.wcs.cunit)
xc, yc, scale, angle = skycoord_to_pixel_scale_angle(central_pos, wcs)
if self.a_in.unit.physical_type == 'angle':
a_in = (scale * self.a_in).to(u.pixel).value
a_out = (scale * self.a_out).to(u.pixel).value
b_out = (scale * self.b_out).to(u.pixel).value
else:
a_in = self.a_in.value
a_out = self.a_out.value
b_out = self.b_out.value
theta = (angle + self.theta).to(u.radian).value
pixel_positions = np.array([x, y]).transpose()
return EllipticalAnnulus(pixel_positions, a_in, a_out, b_out, theta)
class EllipticalAnnulus(PixelAperture):
"""
Elliptical annulus aperture(s), defined in pixel coordinates.
Parameters
----------
positions : tuple, list, array, or `~astropy.units.Quantity`
Pixel coordinates of the aperture center(s), either as a single
``(x, y)`` tuple, a list of ``(x, y)`` tuples, an ``Nx2`` or
``2xN`` `~numpy.ndarray`, or | |
import os
import sys
import time
import json
import yaml
import socket
import string
import netaddr
import threading
import constants as const
import subprocess32 as subprocess
from node import Node
from rest import RestLib
from bridge import Bridge
from threading import Lock
from membership_rule import MembershipRule
class Helper(object):
# lock to serialize stdout of different threads
__print_lock = Lock()
@staticmethod
def get_setup_node_ip():
"""
Get the setup node's eth0 ip
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('bigswitch.com', 0))
return s.getsockname()[0]
@staticmethod
def run_command_on_local_without_timeout(command):
output, error = subprocess.Popen(command,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
shell=True).communicate()
return output, error
@staticmethod
def run_command_on_remote_with_key_without_timeout(node_ip, command):
"""
Run cmd on remote node.
"""
local_cmd = (r'''ssh -t -oStrictHostKeyChecking=no -o LogLevel=quiet %(hostname)s "%(remote_cmd)s"''' %
{'hostname' : node_ip,
'remote_cmd' : command,
})
return Helper.run_command_on_local_without_timeout(local_cmd)
@staticmethod
def run_command_on_local(command, timeout=1800):
"""
Use subprocess to run a shell command on local node.
"""
def target(process):
process.communicate()
try:
p = subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True, bufsize=1)
except Exception as e:
msg = "Error opening process %s: %s\n" % (command, e)
Helper.safe_print(msg)
return
thread = threading.Thread(target=target, args=(p,))
thread.start()
thread.join(timeout)
if thread.is_alive():
p.terminate()
thread.join()
msg = "Timed out waiting for command %s to finish." % command
Helper.safe_print(msg)
@staticmethod
def safe_print(message):
"""
Grab the lock and print to stdout.
The lock is to serialize messages from
different thread. 'stty sane' is to
clean up any hiden space.
"""
with Helper.__print_lock:
subprocess.call('stty sane', shell=True)
sys.stdout.write(message)
sys.stdout.flush()
subprocess.call('stty sane', shell=True)
@staticmethod
def run_command_on_remote_with_passwd(node, command):
"""
Run cmd on remote node.
"""
local_cmd = (r'''sshpass -p %(pwd)s ssh -t -oStrictHostKeyChecking=no -o LogLevel=quiet %(user)s@%(hostname)s >> %(log)s 2>&1 "echo %(pwd)s | sudo -S %(remote_cmd)s"''' %
{'user' : node.user,
'hostname' : node.hostname,
'pwd' : <PASSWORD>,
'log' : node.log,
'remote_cmd' : command,
})
Helper.run_command_on_local(local_cmd)
@staticmethod
def run_command_on_remote_with_passwd_without_timeout(hostname, user, passwd, command):
local_cmd = (r'''sshpass -p %(pwd)s ssh -t -oStrictHostKeyChecking=no -o LogLevel=quiet %(user)s@%(hostname)s "echo %(pwd)s | sudo -S %(remote_cmd)s"''' %
{'user' : user,
'hostname' : hostname,
'pwd' : <PASSWORD>,
'log' : const.LOG_FILE,
'remote_cmd' : command,
})
return Helper.run_command_on_local_without_timeout(local_cmd)
@staticmethod
def copy_file_to_remote_with_passwd(node, src_file, dst_dir, dst_file, mode=777):
"""
Copy file from local node to remote node,
create directory if remote directory doesn't exist,
change the file mode as well.
"""
mkdir_cmd = (r'''mkdir -p %(dst_dir)s''' % {'dst_dir' : dst_dir})
Helper.run_command_on_remote_with_passwd(node, mkdir_cmd)
scp_cmd = (r'''sshpass -p %(pwd)s scp -oStrictHostKeyChecking=no -o LogLevel=quiet -r %(src_file)s %(user)s@%(hostname)s:%(dst_dir)s/%(dst_file)s >> %(log)s 2>&1''' %
{'user' : node.user,
'hostname' : node.hostname,
'pwd' : <PASSWORD>,
'log' : node.log,
'src_file' : src_file,
'dst_dir' : dst_dir,
'dst_file' : dst_file
})
Helper.run_command_on_local(scp_cmd)
chmod_cmd = (r'''chmod -R %(mode)d %(dst_dir)s/%(dst_file)s''' %
{'mode' : mode,
'dst_dir' : dst_dir,
'dst_file' : dst_file
})
Helper.run_command_on_remote_with_passwd(node, chmod_cmd)
@staticmethod
def copy_file_from_remote_with_passwd(node, src_dir, src_file, dst_dir, mode=777):
"""
Copy file from remote node to local node,
create directory if local directory doesn't exist,
change the file mode as well.
"""
mkdir_cmd = (r'''mkdir -p %(dst_dir)s''' % {'dst_dir' : dst_dir})
Helper.run_command_on_local(mkdir_cmd)
scp_cmd = (r'''sshpass -p %(pwd)s scp -oStrictHostKeyChecking=no -o LogLevel=quiet %(user)s@%(hostname)s:%(src_dir)s/%(src_file)s %(dst_dir)s/%(src_file)s >> %(log)s 2>&1''' %
{'pwd' : <PASSWORD>,
'user' : node.user,
'hostname' : node.hostname,
'log' : node.log,
'src_dir' : src_dir,
'dst_dir' : dst_dir,
'src_file' : src_file
})
Helper.run_command_on_local(scp_cmd)
chmod_cmd = (r'''chmod -R %(mode)d %(dst_dir)s/%(src_file)s''' %
{'mode' : mode,
'dst_dir' : dst_dir,
'src_file' : src_file
})
Helper.run_command_on_local(chmod_cmd)
@staticmethod
def run_command_on_remote_with_key(node, command):
"""
Run cmd on remote node.
"""
local_cmd = (r'''ssh -t -oStrictHostKeyChecking=no -o LogLevel=quiet %(hostname)s >> %(log)s 2>&1 "%(remote_cmd)s"''' %
{'hostname' : node.hostname,
'log' : node.log,
'remote_cmd' : command
})
Helper.run_command_on_local(local_cmd)
@staticmethod
def copy_file_to_remote_with_key(node, src_file, dst_dir, dst_file, mode=777):
"""
Copy file from local node to remote node,
create directory if remote directory doesn't exist,
change the file mode as well.
"""
mkdir_cmd = (r'''mkdir -p %(dst_dir)s''' % {'dst_dir' : dst_dir})
Helper.run_command_on_remote_with_key(node, mkdir_cmd)
scp_cmd = (r'''scp -oStrictHostKeyChecking=no -o LogLevel=quiet -r %(src_file)s %(hostname)s:%(dst_dir)s/%(dst_file)s >> %(log)s 2>&1''' %
{'hostname' : node.hostname,
'log' : node.log,
'src_file' : src_file,
'dst_dir' : dst_dir,
'dst_file' : dst_file
})
Helper.run_command_on_local(scp_cmd)
chmod_cmd = (r'''chmod -R %(mode)d %(dst_dir)s/%(dst_file)s''' %
{'mode' : mode,
'dst_dir' : dst_dir,
'dst_file' : dst_file
})
Helper.run_command_on_remote_with_key(node, chmod_cmd)
@staticmethod
def copy_file_from_remote_with_key(node, src_dir, src_file, dst_dir, mode=777):
"""
Copy file from remote node to local node,
create directory if local directory doesn't exist,
change the file mode as well.
"""
mkdir_cmd = (r'''mkdir -p %(dst_dir)s''' % {'dst_dir' : dst_dir})
Helper.run_command_on_local(mkdir_cmd)
scp_cmd = (r'''scp -oStrictHostKeyChecking=no -o LogLevel=quiet %(hostname)s:%(src_dir)s/%(src_file)s %(dst_dir)s/%(src_file)s >> %(log)s 2>&1''' %
{'hostname' : node.hostname,
'log' : node.log,
'src_dir' : src_dir,
'dst_dir' : dst_dir,
'src_file' : src_file
})
Helper.run_command_on_local(scp_cmd)
chmod_cmd = (r'''chmod -R %(mode)d %(dst_dir)s/%(src_file)s''' %
{'mode' : mode,
'dst_dir' : dst_dir,
'src_file' : src_file
})
Helper.run_command_on_local(chmod_cmd)
@staticmethod
def generate_scripts_for_ubuntu(node):
# generate bash script
with open((r'''%(setup_node_dir)s/%(deploy_mode)s/%(bash_template_dir)s/%(bash_template)s_%(os_version)s.sh''' %
{'setup_node_dir' : node.setup_node_dir,
'deploy_mode' : node.deploy_mode,
'bash_template_dir' : const.BASH_TEMPLATE_DIR,
'bash_template' : const.UBUNTU,
'os_version' : node.os_version}), "r") as bash_template_file:
bash_template = bash_template_file.read()
is_controller = False
if node.role == const.ROLE_NEUTRON_SERVER:
is_controller = True
bash = (bash_template %
{'install_ivs' : str(node.install_ivs).lower(),
'install_bsnstacklib' : str(node.install_bsnstacklib).lower(),
'install_all' : str(node.install_all).lower(),
'deploy_dhcp_agent' : str(node.deploy_dhcp_agent).lower(),
'is_controller' : str(is_controller).lower(),
'deploy_horizon_patch': str(node.deploy_horizon_patch).lower(),
'ivs_version' : node.ivs_version,
'bsnstacklib_version' : node.bsnstacklib_version,
'dst_dir' : node.dst_dir,
'hostname' : node.hostname,
'ivs_pkg' : node.ivs_pkg,
'horizon_patch' : node.horizon_patch,
'horizon_patch_dir' : node.horizon_patch_dir,
'horizon_base_dir' : node.horizon_base_dir,
'ivs_debug_pkg' : node.ivs_debug_pkg,
'ovs_br' : node.get_all_ovs_brs(),
'bonds' : node.get_all_bonds(),
'br-int' : const.BR_NAME_INT,
'fuel_cluster_id' : str(node.fuel_cluster_id),
'interfaces' : node.get_all_interfaces(),
'br_fw_admin' : node.br_fw_admin,
'pxe_interface' : node.pxe_interface,
'br_fw_admin_address' : node.br_fw_admin_address,
'br_fw_admin_gw' : node.setup_node_ip,
'uplinks' : node.get_all_uplinks()})
bash_script_path = (r'''%(setup_node_dir)s/%(generated_script_dir)s/%(hostname)s.sh''' %
{'setup_node_dir' : node.setup_node_dir,
'generated_script_dir' : const.GENERATED_SCRIPT_DIR,
'hostname' : node.hostname})
with open(bash_script_path, "w") as bash_file:
bash_file.write(bash)
node.set_bash_script_path(bash_script_path)
# generate puppet script
ivs_daemon_args = (const.IVS_DAEMON_ARGS %
{'inband_vlan' : const.INBAND_VLAN,
'internal_ports' : node.get_ivs_internal_ports(),
'uplink_interfaces' : node.get_uplink_intfs_for_ivs()})
with open((r'''%(setup_node_dir)s/%(deploy_mode)s/%(puppet_template_dir)s/%(puppet_template)s_%(role)s.pp''' %
{'setup_node_dir' : node.setup_node_dir,
'deploy_mode' : node.deploy_mode,
'puppet_template_dir' : const.PUPPET_TEMPLATE_DIR,
'puppet_template' : const.UBUNTU,
'role' : node.role}), "r") as puppet_template_file:
puppet_template = puppet_template_file.read()
puppet = (puppet_template %
{'ivs_daemon_args' : ivs_daemon_args,
'network_vlan_ranges' : node.get_network_vlan_ranges(),
'bcf_controllers' : node.get_controllers_for_neutron(),
'bcf_controller_user' : node.bcf_controller_user,
'bcf_controller_passwd' : node.bcf_controller_passwd,
'port_ips' : node.get_ivs_internal_port_ips(),
'setup_node_ip' : node.setup_node_ip})
puppet_script_path = (r'''%(setup_node_dir)s/%(generated_script_dir)s/%(hostname)s.pp''' %
{'setup_node_dir' : node.setup_node_dir,
'generated_script_dir' : const.GENERATED_SCRIPT_DIR,
'hostname' : node.hostname})
with open(puppet_script_path, "w") as puppet_file:
puppet_file.write(puppet)
node.set_puppet_script_path(puppet_script_path)
# generate ospurge script
if node.role != const.ROLE_NEUTRON_SERVER:
return
openrc = const.MANUAL_OPENRC
if node.fuel_cluster_id:
openrc = const.FUEL_OPENRC
with open((r'''%(setup_node_dir)s/%(deploy_mode)s/%(ospurge_template_dir)s/%(ospurge_template)s.sh''' %
{'setup_node_dir' : node.setup_node_dir,
'deploy_mode' : node.deploy_mode,
'ospurge_template_dir' : const.OSPURGE_TEMPLATE_DIR,
'ospurge_template' : "purge_all"}), "r") as ospurge_template_file:
ospurge_template = ospurge_template_file.read()
ospurge = (ospurge_template % {'openrc' : openrc})
ospurge_script_path = (r'''%(setup_node_dir)s/%(generated_script_dir)s/%(hostname)s_ospurge.sh''' %
{'setup_node_dir' : node.setup_node_dir,
'generated_script_dir' : const.GENERATED_SCRIPT_DIR,
'hostname' : node.hostname})
with open(ospurge_script_path, "w") as ospurge_file:
ospurge_file.write(ospurge)
node.set_ospurge_script_path(ospurge_script_path)
@staticmethod
def generate_scripts_for_centos(node):
# generate bash script
with open((r'''%(setup_node_dir)s/%(deploy_mode)s/%(bash_template_dir)s/%(bash_template)s_%(os_version)s.sh''' %
{'setup_node_dir' : node.setup_node_dir,
'deploy_mode' : node.deploy_mode,
'bash_template_dir' : const.BASH_TEMPLATE_DIR,
'bash_template' : const.CENTOS,
'os_version' : node.os_version}), "r") as bash_template_file:
bash_template = bash_template_file.read()
is_controller = False
if node.role == const.ROLE_NEUTRON_SERVER:
is_controller = True
bash = (bash_template %
{'install_ivs' : str(node.install_ivs).lower(),
'install_bsnstacklib' : str(node.install_bsnstacklib).lower(),
'install_all' : str(node.install_all).lower(),
'deploy_dhcp_agent' : str(node.deploy_dhcp_agent).lower(),
'is_controller' : str(is_controller).lower(),
'deploy_horizon_patch': str(node.deploy_horizon_patch).lower(),
'ivs_version' : node.ivs_version,
'bsnstacklib_version' : node.bsnstacklib_version,
'dst_dir' : node.dst_dir,
'hostname' : node.hostname,
'ivs_pkg' : node.ivs_pkg,
'horizon_patch' : node.horizon_patch,
'horizon_patch_dir' : node.horizon_patch_dir,
'horizon_base_dir' : node.horizon_base_dir,
'ivs_debug_pkg' : node.ivs_debug_pkg,
'ovs_br' : node.get_all_ovs_brs(),
'bonds' : node.get_all_bonds(),
'br-int' : const.BR_NAME_INT})
bash_script_path = (r'''%(setup_node_dir)s/%(generated_script_dir)s/%(hostname)s.sh''' %
{'setup_node_dir' : node.setup_node_dir,
'generated_script_dir' : const.GENERATED_SCRIPT_DIR,
'hostname' : node.hostname})
with open(bash_script_path, "w") as bash_file:
bash_file.write(bash)
node.set_bash_script_path(bash_script_path)
# generate puppet script
ivs_daemon_args = (const.IVS_DAEMON_ARGS %
{'inband_vlan' : const.INBAND_VLAN,
'internal_ports' : node.get_ivs_internal_ports(),
'uplink_interfaces' : node.get_uplink_intfs_for_ivs()})
with open((r'''%(setup_node_dir)s/%(deploy_mode)s/%(puppet_template_dir)s/%(puppet_template)s_%(role)s.pp''' %
{'setup_node_dir' : node.setup_node_dir,
'deploy_mode' : node.deploy_mode,
'puppet_template_dir' : const.PUPPET_TEMPLATE_DIR,
'puppet_template' : const.CENTOS,
'role' : node.role}), "r") as puppet_template_file:
puppet_template = puppet_template_file.read()
puppet = (puppet_template %
{'ivs_daemon_args' : ivs_daemon_args,
'network_vlan_ranges' : node.get_network_vlan_ranges(),
'bcf_controllers' : node.get_controllers_for_neutron(),
'bcf_controller_user' : node.bcf_controller_user,
'bcf_controller_passwd' : node.bcf_controller_passwd,
'selinux_mode' : node.selinux_mode,
'port_ips' : node.get_ivs_internal_port_ips()})
puppet_script_path = (r'''%(setup_node_dir)s/%(generated_script_dir)s/%(hostname)s.pp''' %
{'setup_node_dir' : node.setup_node_dir,
'generated_script_dir' : const.GENERATED_SCRIPT_DIR,
'hostname' : node.hostname})
with open(puppet_script_path, "w") as puppet_file:
puppet_file.write(puppet)
node.set_puppet_script_path(puppet_script_path)
# generate selinux script
selinux_script_path = (r'''%(setup_node_dir)s/%(generated_script_dir)s/%(hostname)s.te''' %
{'setup_node_dir' : node.setup_node_dir,
'generated_script_dir' : const.GENERATED_SCRIPT_DIR,
'hostname' : node.hostname})
subprocess.call(r'''cp %(setup_node_dir)s/%(deploy_mode)s/%(selinux_template_dir)s/%(selinux_template)s.te %(selinux_script_path)s''' %
{'setup_node_dir' : node.setup_node_dir,
'deploy_mode' : node.deploy_mode,
'selinux_template_dir' : const.SELINUX_TEMPLATE_DIR,
'selinux_template' : const.CENTOS,
'selinux_script_path' : selinux_script_path}, shell=True)
node.set_selinux_script_path(selinux_script_path)
# generate ospurge script
if node.role != const.ROLE_NEUTRON_SERVER:
return
openrc = const.PACKSTACK_OPENRC
if node.fuel_cluster_id:
openrc = const.FUEL_OPENRC
with open((r'''%(setup_node_dir)s/%(deploy_mode)s/%(ospurge_template_dir)s/%(ospurge_template)s.sh''' %
{'setup_node_dir' : node.setup_node_dir,
'deploy_mode' : node.deploy_mode,
'ospurge_template_dir' : const.OSPURGE_TEMPLATE_DIR,
'ospurge_template' : "purge_all"}), "r") | |
}
)
@signin_required()
def get_workflow_logs(workflow_id_or_name, user, **kwargs): # noqa
r"""Get workflow logs.
---
get:
summary: Get workflow logs of a workflow.
description: >-
This resource reports the status of a workflow.
Resource is expecting a workflow UUID.
operationId: get_workflow_logs
produces:
- application/json
parameters:
- name: access_token
in: query
description: API access_token of workflow owner.
required: false
type: string
- name: workflow_id_or_name
in: path
description: Required. Analysis UUID or name.
required: true
type: string
- name: steps
in: body
description: Steps of a workflow.
required: false
schema:
type: array
description: List of step names to get logs for.
items:
type: string
description: step name.
- name: page
in: query
description: Results page number (pagination).
required: false
type: integer
- name: size
in: query
description: Number of results per page (pagination).
required: false
type: integer
responses:
200:
description: >-
Request succeeded. Info about a workflow, including the status is
returned.
schema:
type: object
properties:
workflow_id:
type: string
workflow_name:
type: string
logs:
type: string
user:
type: string
examples:
application/json:
{
"workflow_id": "256b25f4-4cfb-4684-b7a8-73872ef455a1",
"workflow_name": "mytest.1",
"logs": "<Workflow engine log output>",
"user": "00000000-0000-0000-0000-000000000000"
}
400:
description: >-
Request failed. The incoming data specification seems malformed.
examples:
application/json:
{
"message": "Malformed request."
}
403:
description: >-
Request failed. User is not allowed to access workflow.
examples:
application/json:
{
"message": "User 00000000-0000-0000-0000-000000000000
is not allowed to access workflow
256b25f4-4cfb-4684-b7a8-73872ef455a1"
}
404:
description: >-
Request failed. User does not exist.
examples:
application/json:
{
"message": "Workflow cdcf48b1-c2f3-4693-8230-b066e088c6ac does
not exist"
}
500:
description: >-
Request failed. Internal controller error.
"""
try:
steps = request.json or None
if not workflow_id_or_name:
raise ValueError("workflow_id_or_name is not supplied")
response, http_response = current_rwc_api_client.api.get_workflow_logs(
user=str(user.id_),
steps=steps or None,
workflow_id_or_name=workflow_id_or_name,
**kwargs,
).result()
return jsonify(response), http_response.status_code
except HTTPError as e:
logging.error(traceback.format_exc())
return jsonify(e.response.json()), e.response.status_code
except ValueError as e:
logging.error(traceback.format_exc())
return jsonify({"message": str(e)}), 403
except Exception as e:
logging.error(traceback.format_exc())
return jsonify({"message": str(e)}), 500
@blueprint.route("/workflows/<workflow_id_or_name>/status", methods=["GET"])
@signin_required()
def get_workflow_status(workflow_id_or_name, user): # noqa
r"""Get workflow status.
---
get:
summary: Get status of a workflow.
description: >-
This resource reports the status of a workflow.
Resource is expecting a workflow UUID.
operationId: get_workflow_status
produces:
- application/json
parameters:
- name: workflow_id_or_name
in: path
description: Required. Analysis UUID or name.
required: true
type: string
- name: access_token
in: query
description: The API access_token of workflow owner.
required: false
type: string
responses:
200:
description: >-
Request succeeded. Info about a workflow, including the status is
returned.
schema:
type: object
properties:
id:
type: string
name:
type: string
created:
type: string
status:
type: string
user:
type: string
progress:
type: object
logs:
type: string
examples:
application/json:
{
"created": "2018-10-29T12:50:12",
"id": "4e576cf9-a946-4346-9cde-7712f8dcbb3f",
"logs": "",
"name": "mytest.1",
"progress": {
"current_command": None,
"current_step_name": None,
"failed": {"job_ids": [], "total": 0},
"finished": {"job_ids": [], "total": 0},
"run_started_at": "2018-10-29T12:51:04",
"running": {"job_ids": [], "total": 0},
"total": {"job_ids": [], "total": 1}
},
"status": "running",
"user": "00000000-0000-0000-0000-000000000000"
}
400:
description: >-
Request failed. The incoming payload seems malformed.
examples:
application/json:
{
"message": "Malformed request."
}
403:
description: >-
Request failed. User is not allowed to access workflow.
examples:
application/json:
{
"message": "User 00000000-0000-0000-0000-000000000000
is not allowed to access workflow
256b25f4-4cfb-4684-b7a8-73872ef455a1"
}
404:
description: >-
Request failed. Either User or Analysis does not exist.
examples:
application/json:
{
"message": "Analysis 256b25f4-4cfb-4684-b7a8-73872ef455a1 does
not exist."
}
500:
description: >-
Request failed. Internal controller error.
"""
try:
if not workflow_id_or_name:
raise ValueError("workflow_id_or_name is not supplied")
response, http_response = current_rwc_api_client.api.get_workflow_status(
user=str(user.id_), workflow_id_or_name=workflow_id_or_name
).result()
return jsonify(response), http_response.status_code
except HTTPError as e:
logging.error(traceback.format_exc())
return jsonify(e.response.json()), e.response.status_code
except ValueError as e:
logging.error(traceback.format_exc())
return jsonify({"message": str(e)}), 403
except Exception as e:
logging.error(traceback.format_exc())
return jsonify({"message": str(e)}), 500
@blueprint.route("/workflows/<workflow_id_or_name>/start", methods=["POST"])
@signin_required()
@check_quota
def start_workflow(workflow_id_or_name, user): # noqa
r"""Start workflow.
---
post:
summary: Start workflow.
description: >-
This resource starts the workflow execution process.
Resource is expecting a workflow UUID.
operationId: start_workflow
consumes:
- application/json
produces:
- application/json
parameters:
- name: workflow_id_or_name
in: path
description: Required. Analysis UUID or name.
required: true
type: string
- name: access_token
in: query
description: The API access_token of workflow owner.
required: false
type: string
- name: parameters
in: body
description: >-
Optional. Additional input parameters and operational options.
required: false
schema:
type: object
responses:
200:
description: >-
Request succeeded. Info about a workflow, including the execution
status is returned.
schema:
type: object
properties:
message:
type: string
workflow_id:
type: string
workflow_name:
type: string
status:
type: string
user:
type: string
examples:
application/json:
{
"message": "Workflow submitted",
"id": "256b25f4-4cfb-4684-b7a8-73872ef455a1",
"workflow_name": "mytest.1",
"status": "queued",
"user": "00000000-0000-0000-0000-000000000000"
}
400:
description: >-
Request failed. The incoming payload seems malformed.
examples:
application/json:
{
"message": "Malformed request."
}
403:
description: >-
Request failed. User is not allowed to access workflow.
examples:
application/json:
{
"message": "User 00000000-0000-0000-0000-000000000000
is not allowed to access workflow
256b25f4-4cfb-4684-b7a8-73872ef455a1"
}
404:
description: >-
Request failed. Either User or Workflow does not exist.
examples:
application/json:
{
"message": "Workflow 256b25f4-4cfb-4684-b7a8-73872ef455a1
does not exist"
}
409:
description: >-
Request failed. The workflow could not be started due to a
conflict.
examples:
application/json:
{
"message": "Workflow 256b25f4-4cfb-4684-b7a8-73872ef455a1
could not be started because it is already
running."
}
500:
description: >-
Request failed. Internal controller error.
501:
description: >-
Request failed. The specified status change is not implemented.
examples:
application/json:
{
"message": "Status resume is not supported yet."
}
"""
def _load_yadage_spec(workflow, operational_options):
"""Load and save in DB the Yadage workflow specification."""
operational_options.update({"accept_metadir": True})
toplevel = operational_options.get("toplevel", "")
workflow.reana_specification = yadage_load_from_workspace(
workflow.workspace_path, workflow.reana_specification, toplevel,
)
Session.commit()
def _calculate_complexity(workflow):
"""Place workflow in queue and calculate and set its complexity."""
complexity = estimate_complexity(workflow.type_, workflow.reana_specification)
workflow.complexity = complexity
workflow.status = RunStatus.queued
Session.commit()
return complexity
try:
if not workflow_id_or_name:
raise ValueError("workflow_id_or_name is not supplied")
parameters = request.json
workflow = _get_workflow_with_uuid_or_name(workflow_id_or_name, str(user.id_))
operational_options = parameters.get("operational_options", {})
operational_options = validate_operational_options(
workflow.type_, operational_options
)
restart_type = None
if "restart" in parameters:
if workflow.status not in [RunStatus.finished, RunStatus.failed]:
raise ValueError("Only finished or failed workflows can be restarted.")
restart_type = (
parameters.get("reana_specification", {})
.get("workflow", {})
.get("type", None)
)
workflow = clone_workflow(
workflow, parameters.get("reana_specification", None), restart_type
)
elif workflow.status != RunStatus.created:
raise ValueError(
"Workflow {} is already {} and cannot be started "
"again.".format(workflow.get_full_workflow_name(), workflow.status.name)
)
if "yadage" in (workflow.type_, restart_type):
_load_yadage_spec(workflow, operational_options)
complexity = _calculate_complexity(workflow)
total_cluster_memory = NodesStatus().get_total_memory()
workflow_priority = workflow.get_priority(total_cluster_memory)
workflow_min_job_memory = get_workflow_min_job_memory(complexity)
current_workflow_submission_publisher.publish_workflow_submission(
user_id=str(user.id_),
workflow_id_or_name=workflow.get_full_workflow_name(),
parameters=parameters,
priority=workflow_priority,
min_job_memory=workflow_min_job_memory,
)
response = {
"message": "Workflow submitted.",
"workflow_id": workflow.id_,
"workflow_name": workflow.name,
"status": RunStatus.queued.name,
"run_number": workflow.run_number,
"user": str(user.id_),
}
return jsonify(response), 200
except HTTPError as e:
logging.error(traceback.format_exc())
return jsonify(e.response.json()), e.response.status_code
except REANAValidationError as e:
logging.error(traceback.format_exc())
return jsonify({"message": str(e)}), 400
except ValueError as e:
logging.error(traceback.format_exc())
return jsonify({"message": str(e)}), 403
except Exception as e:
logging.error(traceback.format_exc())
return jsonify({"message": str(e)}), 500
@blueprint.route("/workflows/<workflow_id_or_name>/status", methods=["PUT"])
@signin_required()
def set_workflow_status(workflow_id_or_name, user): # noqa
r"""Set workflow status.
---
put:
summary: Set status of a workflow.
description: >-
This resource reports the status of a workflow.
Resource is expecting a workflow UUID.
operationId: set_workflow_status
consumes:
- application/json
produces:
- application/json
parameters:
- name: workflow_id_or_name
in: path
description: Required. Analysis UUID or name.
required: true
type: string
- name: status
in: query
description: Required. New workflow status.
required: true
type: string
- name: access_token
in: query
description: The API access_token of workflow owner.
required: false
type: string
- name: parameters
in: body
description: >-
Optional. Additional input parameters and operational options.
required: false
schema:
type: object
responses:
200:
description: >-
Request succeeded. Info about a workflow, including the status is
returned.
schema:
type: object
properties:
message:
type: string
workflow_id:
type: string
workflow_name:
type: string
status:
type: string
user:
type: string
examples:
application/json:
{
"message": "Workflow successfully launched",
"id": "256b25f4-4cfb-4684-b7a8-73872ef455a1",
"workflow_name": "mytest.1",
"status": "created",
"user": "00000000-0000-0000-0000-000000000000"
}
400:
description: >-
Request failed. The incoming payload seems malformed.
examples:
application/json:
{
"message": "Malformed request."
}
403:
description: >-
Request failed. User is not allowed to access workflow.
examples:
application/json:
{
"message": "User 00000000-0000-0000-0000-000000000000
is not allowed to access workflow
256b25f4-4cfb-4684-b7a8-73872ef455a1"
}
404:
description: >-
Request failed. Either User or Workflow does not exist.
examples:
application/json:
{
"message": "Workflow 256b25f4-4cfb-4684-b7a8-73872ef455a1
does not exist"
}
409:
description: >-
Request | |
import datetime
import pytz
import requests
from unittest.mock import patch
from . import TembaClient
from .types import Broadcast, Group, FlowDefinition
from ..exceptions import TembaException, TembaNoSuchObjectError, TembaMultipleResultsError, TembaBadRequestError
from ..exceptions import TembaConnectionError
from ..tests import TembaTest, MockResponse
@patch("temba_client.clients.request")
class TembaClientTest(TembaTest):
API_VERSION = 1
def setUp(self):
self.client = TembaClient("example.com", "1234567890", user_agent="test/0.1")
def test_add_contacts(self, mock_request):
mock_request.return_value = MockResponse(204)
self.client.add_contacts(
contacts=["bfff9984-38f4-4e59-998d-3663ec3c650d", "7a165fe9-575b-4d15-b2ac-58fec913d603"], group="Testers"
)
expected_body = {
"contacts": ["bfff9984-38f4-4e59-998d-3663ec3c650d", "7a165fe9-575b-4d15-b2ac-58fec913d603"],
"action": "add",
"group": "Testers",
}
self.assertRequest(mock_request, "post", "contact_actions", data=expected_body)
def test_archive_contacts(self, mock_request):
mock_request.return_value = MockResponse(204)
self.client.archive_contacts(
contacts=["bfff9984-38f4-4e59-998d-3663ec3c650d", "7a165fe9-575b-4d15-b2ac-58fec913d603"]
)
expected_body = {
"contacts": ["bfff9984-38f4-4e59-998d-3663ec3c650d", "7a165fe9-575b-4d15-b2ac-58fec913d603"],
"action": "archive",
}
self.assertRequest(mock_request, "post", "contact_actions", data=expected_body)
def test_archive_messages(self, mock_request):
mock_request.return_value = MockResponse(204)
self.client.archive_messages(messages=[123, 234, 345])
expected_body = {"messages": [123, 234, 345], "action": "archive"}
self.assertRequest(mock_request, "post", "message_actions", data=expected_body)
def test_block_contacts(self, mock_request):
mock_request.return_value = MockResponse(204)
self.client.block_contacts(
contacts=["bfff9984-38f4-4e59-998d-3663ec3c650d", "7a165fe9-575b-4d15-b2ac-58fec913d603"]
)
expected_body = {
"contacts": ["bfff9984-38f4-4e59-998d-3663ec3c650d", "7a165fe9-575b-4d15-b2ac-58fec913d603"],
"action": "block",
}
self.assertRequest(mock_request, "post", "contact_actions", data=expected_body)
def test_create_broadcast(self, mock_request):
# check by group UUID
mock_request.return_value = MockResponse(200, self.read_json("broadcasts_created"))
broadcast = self.client.create_broadcast("Howdy", groups=["04a4752b-0f49-480e-ae60-3a3f2bea485c"])
expected_body = {"text": "Howdy", "groups": ["04a4752b-0f49-480e-ae60-3a3f2bea485c"]}
self.assertRequest(mock_request, "post", "broadcasts", data=expected_body)
self.assertEqual(broadcast.id, 234252)
self.assertEqual(broadcast.urns, [1234])
self.assertEqual(broadcast.contacts, [])
self.assertEqual(broadcast.groups, ["04a4752b-0f49-480e-ae60-3a3f2bea485c"])
self.assertEqual(broadcast.text, "Howdy")
self.assertEqual(broadcast.status, "Q")
self.assertEqual(broadcast.created_on, datetime.datetime(2014, 12, 12, 22, 56, 58, 917000, pytz.utc))
def test_create_campaign(self, mock_request):
mock_request.return_value = MockResponse(200, self.read_json("campaigns_created"))
campaign = self.client.create_campaign("Reminders", group="591de2c3-66bb-471b-9c9a-761b49a5ca69")
expected_body = {"name": "Reminders", "group_uuid": "591de2c3-66bb-471b-9c9a-761b49a5ca69"}
self.assertRequest(mock_request, "post", "campaigns", data=expected_body)
self.assertEqual(campaign.uuid, "9ccae91f-b3f8-4c18-ad92-e795a2332c11")
def test_create_contact(self, mock_request):
mock_request.return_value = MockResponse(200, self.read_json("contacts_created"))
contact = self.client.create_contact(
"<NAME>",
["tel:+250700000005"],
{"nickname": "Triple A"},
["04a4752b-0f49-480e-ae60-3a3f2bea485c"],
)
expected_body = {
"name": "<NAME>",
"urns": ["tel:+250700000005"],
"fields": {"nickname": "Triple A"},
"group_uuids": ["04a4752b-0f49-480e-ae60-3a3f2bea485c"],
}
self.assertRequest(mock_request, "post", "contacts", data=expected_body)
self.assertEqual(contact.uuid, "bfff9984-38f4-4e59-998d-3663ec3c650d")
self.assertEqual(contact.name, "<NAME>")
self.assertEqual(contact.urns, ["tel:+250700000005"])
self.assertEqual(contact.groups, ["04a4752b-0f49-480e-ae60-3a3f2bea485c"])
self.assertEqual(contact.fields, {"nickname": "Triple A"})
self.assertEqual(contact.language, None)
self.assertEqual(contact.modified_on, datetime.datetime(2014, 10, 1, 6, 54, 9, 817000, pytz.utc))
def test_create_event(self, mock_request):
mock_request.return_value = MockResponse(200, self.read_json("events_created"))
event = self.client.create_event("9ccae91f-b3f8-4c18-ad92-e795a2332c11", "EDD", 14, "D", -1, "Howdy")
expected_body = {
"campaign_uuid": "9ccae91f-b3f8-4c18-ad92-e795a2332c11",
"relative_to": "EDD",
"offset": 14,
"unit": "D",
"delivery_hour": -1,
"message": "Howdy",
}
self.assertRequest(mock_request, "post", "events", data=expected_body)
self.assertEqual(event.uuid, "9e6beda-0ce2-46cd-8810-91157f261cbd")
def test_create_field(self, mock_request):
mock_request.return_value = MockResponse(200, self.read_json("fields_created"))
field = self.client.create_field("Chat Name", "T")
expected_body = {"label": "Chat Name", "value_type": "T"}
self.assertRequest(mock_request, "post", "fields", data=expected_body)
self.assertEqual(field.key, "chat_name")
self.assertEqual(field.label, "Chat Name")
self.assertEqual(field.value_type, "T")
# with key provided
field = self.client.create_field("Chat Name", "T", key="chat_name")
expected_body = {"label": "Chat Name", "value_type": "T", "key": "chat_name"}
self.assertRequest(mock_request, "post", "fields", data=expected_body)
self.assertEqual(field.key, "chat_name")
self.assertEqual(field.label, "Chat Name")
self.assertEqual(field.value_type, "T")
def test_create_label(self, mock_request):
mock_request.return_value = MockResponse(200, self.read_json("labels_created"))
label = self.client.create_label("Really High Priority")
expected_body = {"name": "Really High Priority"}
self.assertRequest(mock_request, "post", "labels", data=expected_body)
self.assertEqual(label.uuid, "affa6685-0725-49c7-a15a-96f301d996e4")
self.assertEqual(label.name, "Really High Priority")
self.assertEqual(label.count, 0)
def test_create_runs(self, mock_request):
mock_request.return_value = MockResponse(200, self.read_json("runs_created"))
runs = self.client.create_runs(
"04a4752b-0f49-480e-ae60-3a3f2bea485c", ["bfff9984-38f4-4e59-998d-3663ec3c650d"], True
)
expected_body = {
"contacts": ["bfff9984-38f4-4e59-998d-3663ec3c650d"],
"restart_participants": 1,
"flow_uuid": "04a4752b-0f49-480e-ae60-3a3f2bea485c",
}
self.assertRequest(mock_request, "post", "runs", data=expected_body)
runs = self.client.create_runs(
"04a4752b-0f49-480e-ae60-3a3f2bea485c",
["bfff9984-38f4-4e59-998d-3663ec3c650d"],
True,
extra={"variable": "value"},
)
expected_body = {
"contacts": ["bfff9984-38f4-4e59-998d-3663ec3c650d"],
"restart_participants": 1,
"flow_uuid": "04a4752b-0f49-480e-ae60-3a3f2bea485c",
"extra": {"variable": "value"},
}
self.assertRequest(mock_request, "post", "runs", data=expected_body)
self.assertEqual(len(runs), 2)
def test_delete_contact(self, mock_request):
# check deleting an existing contact
mock_request.return_value = MockResponse(204, "")
self.client.delete_contact("bfff9984-38f4-4e59-998d-3663ec3c650d")
self.assertRequest(mock_request, "delete", "contacts", params={"uuid": "bfff9984-38f4-4e59-998d-3663ec3c650d"})
# check deleting a non-existent contact
mock_request.return_value = MockResponse(404, "NOT FOUND")
self.assertRaises(TembaNoSuchObjectError, self.client.delete_contact, "bfff9984-38f4-4e59-998d-3663ec3c650d")
def test_delete_contacts(self, mock_request):
mock_request.return_value = MockResponse(204)
self.client.delete_contacts(
contacts=["bfff9984-38f4-4e59-998d-3663ec3c650d", "7a165fe9-575b-4d15-b2ac-58fec913d603"]
)
expected_body = {
"contacts": ["bfff9984-38f4-4e59-998d-3663ec3c650d", "7a165fe9-575b-4d15-b2ac-58fec913d603"],
"action": "delete",
}
self.assertRequest(mock_request, "post", "contact_actions", data=expected_body)
def test_delete_event(self, mock_request):
mock_request.return_value = MockResponse(204, "")
self.client.delete_event("bfff9984-38f4-4e59-998d-3663ec3c650d")
self.assertRequest(mock_request, "delete", "events", params={"uuid": "bfff9984-38f4-4e59-998d-3663ec3c650d"})
def test_delete_messages(self, mock_request):
mock_request.return_value = MockResponse(204)
self.client.delete_messages(messages=[123, 234, 345])
expected_body = {"messages": [123, 234, 345], "action": "delete"}
self.assertRequest(mock_request, "post", "message_actions", data=expected_body)
def test_expire_contacts(self, mock_request):
mock_request.return_value = MockResponse(204)
self.client.expire_contacts(
contacts=["bfff9984-38f4-4e59-998d-3663ec3c650d", "7a165fe9-575b-4d15-b2ac-58fec913d603"]
)
expected_body = {
"contacts": ["bfff9984-38f4-4e59-998d-3663ec3c650d", "7a165fe9-575b-4d15-b2ac-58fec913d603"],
"action": "expire",
}
self.assertRequest(mock_request, "post", "contact_actions", data=expected_body)
def test_get_boundaries(self, mock_request):
mock_request.return_value = MockResponse(200, self.read_json("boundaries_multiple"))
boundaries = self.client.get_boundaries()
self.assertRequest(mock_request, "get", "boundaries")
self.assertEqual(len(boundaries), 2)
boundary1 = boundaries[0]
boundary2 = boundaries[1]
self.assertEqual(boundary1.boundary, "R195269")
self.assertEqual(boundary1.name, "Burundi")
self.assertEqual(boundary1.level, 0)
self.assertFalse(boundary1.parent)
self.assertEqual(boundary1.geometry.type, "MultiPolygon")
self.assertIsInstance(boundary1.geometry.coordinates, list)
self.assertEqual(boundary2.level, 1)
self.assertEqual(boundary2.parent, "R195269")
def test_get_broadcast(self, mock_request):
# check single item response
mock_request.return_value = MockResponse(200, self.read_json("broadcasts_single"))
broadcast = self.client.get_broadcast(1234)
self.assertRequest(mock_request, "get", "broadcasts", params={"id": 1234})
self.assertEqual(broadcast.id, 1234)
self.assertEqual(broadcast.urns, [55454])
self.assertEqual(broadcast.contacts, [])
self.assertEqual(broadcast.groups, ["04a4752b-0f49-480e-ae60-3a3f2bea485c"])
self.assertEqual(broadcast.text, "Hello")
self.assertEqual(broadcast.created_on, datetime.datetime(2014, 11, 12, 22, 56, 58, 917000, pytz.utc))
self.assertEqual(broadcast.status, "Q")
def test_get_broadcasts(self, mock_request):
# check no params
mock_request.return_value = MockResponse(200, self.read_json("broadcasts_multiple"))
broadcasts = self.client.get_broadcasts()
self.assertRequest(mock_request, "get", "broadcasts")
self.assertEqual(len(broadcasts), 2)
self.assertEqual(broadcasts[0].id, 1234)
# check all params
self.client.get_broadcasts(
ids=[1234, 2345],
statuses=["P", "Q"],
before=datetime.datetime(2014, 12, 12, 22, 34, 36, 123000, pytz.utc),
after=datetime.datetime(2014, 12, 12, 22, 34, 36, 234000, pytz.utc),
)
self.assertRequest(
mock_request,
"get",
"broadcasts",
params={
"id": [1234, 2345],
"status": ["P", "Q"],
"before": "2014-12-12T22:34:36.123000Z",
"after": "2014-12-12T22:34:36.234000Z",
},
)
def test_get_campaign(self, mock_request):
# check single item response
mock_request.return_value = MockResponse(200, self.read_json("campaigns_single"))
campaign = self.client.get_campaign("9ccae91f-b3f8-4c18-ad92-e795a2332c11")
self.assertRequest(mock_request, "get", "campaigns", params={"uuid": "9ccae91f-b3f8-4c18-ad92-e795a2332c11"})
self.assertEqual(campaign.uuid, "9ccae91f-b3f8-4c18-ad92-e795a2332c11")
self.assertEqual(campaign.name, "Mother Reminders")
self.assertEqual(campaign.group, "591de2c3-66bb-471b-9c9a-761b49a5ca69")
self.assertEqual(campaign.created_on, datetime.datetime(2015, 6, 8, 12, 18, 7, 671000, pytz.utc))
# check empty response
mock_request.return_value = MockResponse(200, self.read_json("empty"))
self.assertRaises(TembaNoSuchObjectError, self.client.get_campaign, "xyz")
# check multiple item response
mock_request.return_value = MockResponse(200, self.read_json("campaigns_multiple"))
self.assertRaises(TembaMultipleResultsError, self.client.get_campaign, "9ccae91f-b3f8-4c18-ad92-e795a2332c11")
def test_get_campaigns(self, mock_request):
# check no params
mock_request.return_value = MockResponse(200, self.read_json("campaigns_multiple"))
campaigns = self.client.get_campaigns()
self.assertRequest(mock_request, "get", "campaigns")
self.assertEqual(len(campaigns), 2)
self.assertEqual(campaigns[0].uuid, "9ccae91f-b3f8-4c18-ad92-e795a2332c11")
def test_get_contact(self, mock_request):
# check single item response
mock_request.return_value = MockResponse(200, self.read_json("contacts_single"))
contact = self.client.get_contact("bfff9984-38f4-4e59-998d-3663ec3c650d")
self.assertRequest(mock_request, "get", "contacts", params={"uuid": "bfff9984-38f4-4e59-998d-3663ec3c650d"})
self.assertEqual(contact.uuid, "bfff9984-38f4-4e59-998d-3663ec3c650d")
self.assertEqual(contact.name, "<NAME>")
self.assertEqual(contact.urns, ["tel:+250700000001"])
self.assertEqual(contact.groups, ["04a4752b-0f49-480e-ae60-3a3f2bea485c"])
self.assertEqual(contact.fields, {"nickname": "Hannibal"})
self.assertEqual(contact.language, None)
self.assertEqual(contact.blocked, False)
self.assertEqual(contact.failed, False)
self.assertEqual(contact.modified_on, datetime.datetime(2014, 10, 1, 6, 54, 9, 817000, pytz.utc))
# check empty response
mock_request.return_value = MockResponse(200, self.read_json("empty"))
self.assertRaises(TembaNoSuchObjectError, self.client.get_contact, "xyz")
# check multiple item response
mock_request.return_value = MockResponse(200, self.read_json("contacts_multiple"))
self.assertRaises(TembaMultipleResultsError, self.client.get_contact, "bfff9984-38f4-4e59-998d-3663ec3c650d")
def test_get_contacts(self, mock_request):
# check no params
mock_request.return_value = MockResponse(200, self.read_json("contacts_multiple"))
contacts = self.client.get_contacts()
self.assertRequest(mock_request, "get", "contacts")
self.assertEqual(len(contacts), 4)
self.assertEqual(contacts[0].uuid, "bfff9984-38f4-4e59-998d-3663ec3c650d")
# check filtering by group_uuids
mock_request.return_value = MockResponse(200, self.read_json("contacts_multiple"))
self.client.get_contacts(groups=["abc"])
self.assertRequest(mock_request, "get", "contacts", params={"group_uuids": ["abc"]})
# check filtering by group object
group1 = Group.create(name="A-Team", uuid="xyz", size=4)
mock_request.return_value = MockResponse(200, self.read_json("contacts_multiple"))
self.client.get_contacts(groups=[group1])
self.assertRequest(mock_request, "get", "contacts", params={"group_uuids": ["xyz"]})
# check filtering modified after a date
mock_request.return_value = MockResponse(200, self.read_json("contacts_multiple"))
self.client.get_contacts(after=datetime.datetime(2014, 12, 12, 22, 34, 36, 123000, pytz.utc))
self.assertRequest(mock_request, "get", "contacts", params={"after": "2014-12-12T22:34:36.123000Z"})
# check filtering modified before a date
mock_request.return_value = MockResponse(200, self.read_json("contacts_multiple"))
self.client.get_contacts(before=datetime.datetime(2014, 12, 12, 22, 34, 36, 123000, pytz.utc))
self.assertRequest(mock_request, "get", "contacts", params={"before": "2014-12-12T22:34:36.123000Z"})
# check filtering modified between dates
mock_request.return_value = MockResponse(200, self.read_json("contacts_multiple"))
self.client.get_contacts(
after=datetime.datetime(2014, 12, 12, 22, 34, 36, 123000, pytz.utc),
before=datetime.datetime(2014, 12, 12, 22, 34, 36, 123000, pytz.utc),
)
self.assertRequest(
mock_request,
"get",
"contacts",
params={"after": "2014-12-12T22:34:36.123000Z", "before": "2014-12-12T22:34:36.123000Z"},
)
# check multiple pages
mock_request.side_effect = (
MockResponse(200, self.read_json("contacts_multipage_1")),
MockResponse(200, self.read_json("contacts_multipage_2")),
MockResponse(200, self.read_json("contacts_multipage_3")),
)
contacts = self.client.get_contacts(after=datetime.datetime(2014, 12, 12, 22, 34, 36, 123000, pytz.utc))
self.assertEqual(len(contacts), 21)
self.assertRequestURL(
mock_request, "get", "https://example.com/api/v1/contacts.json?page=3&before=2014-12-12T22:34:36.123"
)
# test with paging
mock_request.side_effect = (
MockResponse(200, self.read_json("contacts_multipage_1")),
MockResponse(200, self.read_json("contacts_multipage_2")),
MockResponse(200, self.read_json("contacts_multipage_3")),
)
pager = self.client.pager()
contacts = self.client.get_contacts(pager=pager)
self.assertEqual(len(contacts), 10)
self.assertEqual(pager.total, 21)
self.assertTrue(pager.has_more())
contacts = self.client.get_contacts(pager=pager)
self.assertEqual(len(contacts), 10)
self.assertEqual(pager.total, 21)
self.assertTrue(pager.has_more())
contacts = self.client.get_contacts(pager=pager)
self.assertEqual(len(contacts), 1)
self.assertEqual(pager.total, 21)
self.assertFalse(pager.has_more())
# test asking for explicit page
mock_request.return_value = MockResponse(200, self.read_json("contacts_multipage_2"))
mock_request.side_effect = None
pager = self.client.pager(start_page=2)
contacts = self.client.get_contacts(pager=pager)
self.assertEqual(len(contacts), 10)
self.assertRequest(mock_request, "get", "contacts", params={"page": 2})
# test with connection error
mock_request.side_effect = requests.exceptions.ConnectionError
self.assertRaises(TembaConnectionError, self.client.get_contacts)
def test_get_event(self, mock_request):
# check single item response
mock_request.return_value = MockResponse(200, self.read_json("events_single"))
event = self.client.get_event("9e6beda-0ce2-46cd-8810-91157f261cbd")
self.assertRequest(mock_request, "get", "events", params={"uuid": "9e6beda-0ce2-46cd-8810-91157f261cbd"})
self.assertEqual(event.uuid, "9e6beda-0ce2-46cd-8810-91157f261cbd")
self.assertEqual(event.campaign, "9ccae91f-b3f8-4c18-ad92-e795a2332c11")
self.assertEqual(event.relative_to, "EDD")
self.assertEqual(event.offset, 14)
self.assertEqual(event.unit, "D")
self.assertEqual(event.delivery_hour, -1)
self.assertEqual(event.message, "")
self.assertEqual(event.flow, "70c38f94-ab42-4666-86fd-3c76139110d3")
self.assertEqual(event.created_on, datetime.datetime(2015, 6, 8, 12, 18, 7, 671000, pytz.utc))
# check empty response
mock_request.return_value = MockResponse(200, self.read_json("empty"))
self.assertRaises(TembaNoSuchObjectError, self.client.get_event, "xyz")
# check multiple item response
mock_request.return_value = MockResponse(200, self.read_json("events_multiple"))
self.assertRaises(TembaMultipleResultsError, self.client.get_event, "9e6beda-0ce2-46cd-8810-91157f261cbd")
def test_get_events(self, mock_request):
# check no params
mock_request.return_value = MockResponse(200, self.read_json("events_multiple"))
events = self.client.get_events()
self.assertRequest(mock_request, "get", "events")
self.assertEqual(len(events), 2)
self.assertEqual(events[0].uuid, "9e6beda-0ce2-46cd-8810-91157f261cbd")
def test_get_field(self, mock_request):
# check single item response
mock_request.return_value = MockResponse(200, self.read_json("fields_single"))
field = self.client.get_field("chat_name")
self.assertRequest(mock_request, "get", "fields", params={"key": "chat_name"})
self.assertEqual(field.label, "Chat Name")
self.assertEqual(field.value_type, "T")
# check empty response
mock_request.return_value = MockResponse(200, self.read_json("empty"))
self.assertRaises(TembaNoSuchObjectError, self.client.get_field, "xyz")
# check multiple item response
mock_request.return_value = MockResponse(200, self.read_json("fields_multiple"))
self.assertRaises(TembaMultipleResultsError, self.client.get_flow, "chat_name")
def test_get_fields(self, mock_request):
# check no params
mock_request.return_value = MockResponse(200, self.read_json("fields_multiple"))
fields = self.client.get_fields()
self.assertRequest(mock_request, "get", "fields")
self.assertEqual(len(fields), 2)
self.assertEqual(fields[0].key, "chat_name")
def test_get_flow(self, mock_request):
# check single item response
mock_request.return_value = MockResponse(200, self.read_json("flows_single"))
flow = self.client.get_flow("a68567fa-ad95-45fc-b5f7-3ce90ebbd46d")
self.assertRequest(mock_request, "get", "flows", params={"uuid": "a68567fa-ad95-45fc-b5f7-3ce90ebbd46d"})
self.assertEqual(flow.uuid, "a68567fa-ad95-45fc-b5f7-3ce90ebbd46d")
self.assertEqual(flow.name, "Ping")
self.assertEqual(flow.archived, False)
| |
<gh_stars>1-10
from pathlib import Path
import numpy as np
from matplotlib import pyplot as plt
from typing import Union, Optional, OrderedDict
Array = Union[list, np.ndarray]
# ref: https://stackoverflow.com/questions/47074423/how-to-get-default-blue-colour-of-matplotlib-pyplot-scatter/47074742
MY_DEFAULT_BLUE: str = '#1f77b4' # I like this blue but it might change whats default,
MDB: str = MY_DEFAULT_BLUE
def plot_quick(y: Array, xlabel: str, ylabel: str, linewidth: float = 2.0, show: bool = False, save_plot: bool = False,
plot_name: str = 'plot'):
"""
Plots y against it's indices
"""
plt.plot(y, lw=linewidth) # lw is linewidth
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.grid(True)
plt.tight_layout()
# note: needs to be done in this order or it will clear the plot.
if save_plot:
save_to_desktop(plot_name)
if show:
plt.show()
def _plot(x: Array, y: Array, xlabel: str, ylabel: str,
linewidth: float = 2.0, show: bool = False,
save_plot: bool = False, plot_filename: str = 'plot', title: Optional[str] = None):
"""
Plots y against x.
"""
plt.plot(x, y, lw=linewidth) # lw is linewidth
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.grid(True)
plt.tight_layout()
# - optionals
plt.title(title)
# note: needs to be done in this order or it will clear the plot.
if save_plot:
save_to_desktop(plot_filename)
if show:
plt.show()
def plot(x: Array, y: Array, xlabel: str, ylabel: str,
linewidth: float = 2.0, show: bool = False,
save_plot: bool = False, plot_filename: str = 'plot', title: Optional[str] = None,
label: Optional[str] = None,
y_hline: Optional[float] = None, y_hline_label: Optional[str] = None,
x_hline: Optional[float] = None, x_hline_label: Optional[str] = None,
new_plot: bool = False, marker: Optional = None, color: Optional = None,
tight_layout: bool = False
):
"""
Nice easy plot function to quickly plot x vs y and labeling the x and y.
Nice optional args, like plotting straight (horizontal or vertical lines), saving MI_plots_sl_vs_maml_1st_attempt, showing the plot, adding
optional legends etc.
Saves png, svg, pdf, to Desktop automatically if save_plot=True.
Easiest use: plot(x, y, xlabel, ylabel)
Easy recommended use: plost(x, y, xlabel, ylabel, save_plot=True, title=title)
"""
if new_plot:
fig, axs = plt.subplots(nrows=1, ncols=1, sharex=True, tight_layout=True)
axs.set_xlabel(xlabel)
axs.set_ylabel(ylabel)
axs.set_title(title)
else:
axs = plt
plt.xlabel(xlabel)
plt.ylabel(ylabel)
axs.title(title)
axs.plot(x, y, marker=marker, label=label, lw=linewidth, color=color)
axs.grid(True) # adds nice grids instead of plot being white
if tight_layout:
plt.tight_layout() # automatically adjusts subplot params so that the subplot(s) fits in to the figure area.
# - optionals
if x_hline: # horizontal sets a constant x value
axs.axvline(x=x_hline, color='g', linestyle='--', label=x_hline_label)
if y_hline: # vertical sets a constant y value
axs.axhline(y=y_hline, color='r', linestyle='--', label=y_hline_label)
if label or y_hline_label or x_hline_label:
axs.legend() # LABEL = LEGEND. A legend is an area describing the elements of the graph.
if save_plot:
save_to_desktop(plot_filename)
if show:
plt.show()
def plot_with_error_bands(x: np.ndarray, y: np.ndarray, yerr: np.ndarray,
xlabel: str, ylabel: str,
title: str,
curve_label: Optional[str] = None,
error_band_label: Optional[str] = None,
x_vals_as_symbols: Optional[list[str]] = None,
color: Optional[str] = None, ecolor: Optional[str] = None,
linewidth: float = 1.0,
style: Optional[str] = 'default',
capsize: float = 3.0,
alpha: float = 0.2,
ylim: Optional[tuple[float, float]] = None,
show: bool = False
):
"""
Plot custom error bands given x and y.
note:
- example values for color and ecolor:
color='tab:blue', ecolor='tab:blue'
- capsize is the length of the horizontal line for the error bar. Larger number makes it longer horizontally.
- alpha value create than 0.2 make the error bands color for filling it too dark. Really consider not changing.
- sample values for curves and error_band labels:
curve_label: str = 'mean with error bars',
error_band_label: str = 'error band',
- use x_vals_as_symbols to have strings in the x-axis for each individual points. Warning, it might clutter the
x-axis so use just a few.
refs:
- for making the seaborn and matplot lib look the same see: https://stackoverflow.com/questions/54522709/my-seaborn-and-matplotlib-plots-look-the-same
"""
if style == 'default':
# use the standard matplotlib
plt.style.use("default")
elif style == 'seaborn' or style == 'sns':
# looks idential to seaborn
import seaborn as sns
sns.set()
elif style == 'seaborn-darkgrid':
# uses the default colours of matplot but with blue background of seaborn
plt.style.use("seaborn-darkgrid")
elif style == 'ggplot':
# other alternative to something that looks like seaborn
plt.style.use('ggplot')
# ax = plt.gca()
# fig = plt.gcf(
# fig, axs = plt.subplots(nrows=1, ncols=1, sharex=True, tight_layout=True)
# - if symbols in x axis instead of raw x value
if x_vals_as_symbols is not None:
# plt.xticks(x, [f'val{v}' for v in x]) to test
plt.xticks(x, x_vals_as_symbols)
# - plot bands
plt.errorbar(x=x, y=y, yerr=yerr, color=color, ecolor=ecolor,
capsize=capsize, linewidth=linewidth, label=curve_label)
plt.fill_between(x=x, y1=y - yerr, y2=y + yerr, alpha=alpha, label=error_band_label)
plt.grid(True)
if curve_label or error_band_label:
plt.legend()
if ylim is not None:
plt.ylim(*ylim)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if show:
plt.show()
def save_to_desktop(plot_name: str = 'plot'):
"""
Assuming you have not called show, saves it to local users desktop as a png, svg & pdf.
"""
root = Path('~/Desktop').expanduser()
plt.savefig(root / f'{plot_name}.png')
plt.savefig(root / f'{plot_name}.svg')
plt.savefig(root / f'{plot_name}.pdf')
def save_to_home(plot_name: str = 'plot'):
"""
Assuming you have not called show, saves it to local users desktop as a png, svg & pdf.
"""
root = Path('~/').expanduser()
plt.savefig(root / f'{plot_name}.png')
plt.savefig(root / f'{plot_name}.svg')
plt.savefig(root / f'{plot_name}.pdf')
def save_to(root: Path, plot_name: str = 'plot'):
"""
Assuming there is a plot in display, saves it to local users desktop users desktop as a png, svg & pdf.
"""
root: Path = root.expanduser()
plt.savefig(root / f'{plot_name}.png')
plt.savefig(root / f'{plot_name}.svg')
plt.savefig(root / f'{plot_name}.pdf')
def draw_veritcal_line(at_x_value: float, start: float, stop: float, num: int = 100, linestyle: str = "--"):
"""
Draws a vertical line at a specific x value for a given range [start, stop].
Note:
- num tells us how dense to draw this line.
"""
plt.plot([at_x_value] * num, np.linspace(start, stop, num), linestyle=linestyle)
# - seaborn
LayerIdentifier = str
def _list_order_dict2data_frame(data: list[OrderedDict[LayerIdentifier, float]], metric: str):
"""
Converts [B, L] to pandas dataframe (just a table) with the row as:
{'layer_name': f'Layer{col}', 'metric': metric, 'sample_val': data[row, col]}
so it makes sure it has the metric repeated for all sample values basically in one single table.
"""
from pandas import DataFrame
# - for each data value in [B, L] create a table augmented with the name for that data (which associates all the
# values for one curve together in the table)
B: int = len(list) # e.g. data.shape[0] number of columns
L: int = len(list) # e.g. data.shape[1] number of rows
for row in range(B): # b
for col in range(L): # l
df_row = {'layer_name': f'Layer{col}', 'metric': metric, 'sample_val': data[row, col]}
# - make sure you do it like this so that the previous data frame is added to the new one and the assignment
# is there to not forget the growing data_frame (really a table)
data_df: DataFrame = data_df.append(df_row, ignore_index=True)
return data_df
def _list_metrics_full_data2data_frame(dist_metric2data: OrderedDict[str, list[OrderedDict[LayerIdentifier, float]]]):
"""
lst_data should be a list of experiment results for each metric. e.g.
dist_metric2data[metric_name] -> [B, L] matrix for the individual sample values for each layer (total B*L sample values).
usually you have 4 num_metrics [svcca, pwcca, lincka, opd].
Then the data frame effective has size [4, B, L] values but all flattened into a table where each row has a row value
as [row_arbitrary_name, metric, layer_name, val]
"""
import pandas as pd
from pandas import DataFrame
column_names = ["layer_name", "metric", "sample_val"]
data_df: DataFrame = pd.DataFrame(columns=column_names)
for metric, data in dist_metric2data.items():
# assert data is size [B, L]
# get [B, L] -> have the metric in the table
new_data_df: DataFrame = _list_order_dict2data_frame(data, metric)
# - append current data to growing data frame
data_df = data_df.join(new_data_df)
return data_df
def plot_seaborn_table_with_metric(dist_metric2data: OrderedDict[str, list[OrderedDict[LayerIdentifier, float]]]):
"""
The main idea of this function is that we have a collection of values in [B, L] e.g. B sim/dist values for each
layer L organized as a list[OrderedDict[str, float]].
But we have one for each metric type so we have another OrderDict[metric] -> [B, L] data.
But we need to append it all into one table and have one column for the metric and each [B, L] has that value
appended to it. Each metric essentially will be it's one curve (with error bands).
Then the hue | |
<gh_stars>0
import os
import re
import sys
import logging
import warnings
import idlib
from idlib.utils import log as _ilog
from augpathlib.utils import log as _alog
from pyontutils.utils import (makeSimpleLogger,
python_identifier, # FIXME update imports
TZLOCAL,
utcnowtz,
isoformat,
isoformat_safe,
timeformat_friendly)
from . import exceptions as exc
from .config import auth
_find_command = 'gfind' if sys.platform == 'darwin' else 'find'
log = makeSimpleLogger('sparcur')
logd = log.getChild('data')
loge = log.getChild('export')
# set augpathlib log format to pyontutils (also sets all child logs)
_alog.removeHandler(_alog.handlers[0])
_alog.addHandler(log.handlers[0])
# idlib logs TODO move to pyontutils probably?
_ilog.removeHandler(_alog.handlers[0])
_ilog.addHandler(log.handlers[0])
# silence known warnings on pypy3
if hasattr(sys, 'pypy_version_info'):
warnings.filterwarnings('ignore', module='.+protobuf.+')
__type_registry = {None: None}
def register_type(cls, type_name):
if type_name in __type_registry:
if __type_registry[type_name] is cls:
# better to do this check here than to force
# all callers to check for themselves which
# can fail if two separate systems try to
# register the same type
return
raise ValueError(f'Cannot map {cls} to {type_name}. '
'Type already present! '
f'{type_name} -> {__type_registry[type_name]}')
__type_registry[type_name] = cls
def register_all_types():
# as a side effect this registers idlib streams and OntTerm
# sigh doing anything in the top level of python :/
import sparcur.core
import sparcur.paths # also a top level registration
# this is not done at top level because it is quite slow
from pysercomb.pyr import units as pyru
[register_type(c, c.tag)
for c in (pyru._Quant, pyru.Range, pyru.Approximately)]
class IdentityJsonType:
""" use to register types that should not be recursed upon e.g.
because they contain external use of type that does not align
with our usage """
@classmethod
def fromJson(cls, blob):
return blob
def fromJson(blob):
def nitr(value):
try:
return value not in __type_registry
except TypeError as e:
log.critical(e)
return False
if isinstance(blob, dict):
if 'type' in blob:
t = blob['type']
if t == 'identifier':
type_name = blob['system']
elif t in ('quantity', 'range'):
type_name = t
elif nitr(t):
breakpoint()
raise NotImplementedError(f'TODO fromJson for type {t} '
f'currently not implemented\n{blob}')
else:
type_name = t
cls = __type_registry[type_name]
if cls is not None:
return cls.fromJson(blob)
return {k: v
if k == 'errors' or k.endswith('_errors') else
fromJson(v)
for k, v in blob.items()}
elif isinstance(blob, list):
return [fromJson(_) for _ in blob]
else:
return blob
def path_irs(*paths_or_strings):
"""Given one or more paths pointing to sparcur export
json yield the python internal representation."""
# TODO support for urls
import json
register_all_types()
for path_or_string in paths_or_strings:
with open(path_or_string) as f:
blob = json.load(f)
yield fromJson(blob)
def path_ir(path_or_string):
"""Given a path or string return the sparcur python ir."""
return next(path_irs(path_or_string))
def render_manifest(rows):
# FIXME checksum cypher
return [[filename.as_posix(),
isoformat(timestamp),
desc,
ft,
at,
checksum.hex() if checksum else checksum,]
for filename, timestamp, desc, ft, at, checksum in rows]
def write_manifests(*args, parents=None, parents_rows=None, suffix='.csv',
include_directories=False,):
header = ('filename', 'timestamp', 'description', # FIXME no hardcode
'file type', 'additional types', 'checksum')
if parents is None and parents_rows is None:
raise TypeError('one of parents or parents_rows is required')
elif parents and parents_rows:
raise TypeError('at most one of parents or parents_rows is allowed')
if parents_rows:
parents = [p for p, r in parents_rows]
existing = []
for parent in parents:
manifest = parent / f'manifest{suffix}'
if manifest.exists():
existing.append(manifest)
if existing: # TODO overwrite etc.
# FIXME TODO relative to some reference point
msg = f'Existing manifest files detected not writing!\n{existing}'
raise ValueError(msg)
if parents_rows is None:
parents_rows = [(path, path.generate_manifest())
for path in parents]
manifests_rendered = []
if suffix == '.csv': # FIXME deal with different suffixes
import csv
paths_rendered = [(path, render_manifest(manifest))
for path, manifest in parents_rows]
for path, rendered in paths_rendered:
manifest = parent / f'manifest{suffix}'
manifests_rendered.append((manifest, rendered))
with open(manifest, 'wt') as f:
csv.writer(f).writerows([header] + rendered)
else:
raise NotImplementedError(f"Don't know how to export {suffix}")
return manifests_rendered
def expand_label_curie(rows_of_terms):
return [[value for term in rot for value in
(term.label if term is not None else '',
term.curie if term is not None else '')]
for rot in rows_of_terms]
def levenshteinDistance(s1, s2):
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for i2, c2 in enumerate(s2):
distances_ = [i2+1]
for i1, c1 in enumerate(s1):
if c1 == c2:
distances_.append(distances[i1])
else:
distances_.append(1 + min((distances[i1],
distances[i1 + 1],
distances_[-1])))
distances = distances_
return distances[-1]
class GetTimeNow:
def __init__(self):
self._start_time = utcnowtz()
self._start_local_tz = TZLOCAL() # usually PST PDT
@property
def _start_time_local(self):
return self._start_time.astimezone(self._start_local_tz)
@property
def START_TIMESTAMP(self):
return isoformat(self._start_time)
@property
def START_TIMESTAMP_SAFE(self):
return isoformat_safe(self._start_time)
@property
def START_TIMESTAMP_FRIENDLY(self):
return timeformat_friendly(self._start_time)
@property
def START_TIMESTAMP_LOCAL(self):
return isoformat(self._start_time_local)
@property
def START_TIMESTAMP_LOCAL_SAFE(self):
return isoformat_safe(self._start_time_local)
@property
def START_TIMESTAMP_LOCAL_FRIENDLY(self):
return timeformat_friendly(self._start_time_local)
class SimpleFileHandler:
_FIRST = object()
def __init__(self, log_file_path, *logs, mimic=_FIRST):
self.log_file_handler = logging.FileHandler(log_file_path.as_posix())
if mimic is self._FIRST and logs:
self.mimic(logs[0])
elif mimic:
self.mimic(mimic)
for log in logs:
self(log)
def __call__(self, *logs_to_handle):
for log in logs_to_handle:
log.addHandler(self.log_file_handler)
def mimic(self, log):
self.log_file_handler.setFormatter(log.handlers[0].formatter)
def silence_loggers(*logs):
for log in logs:
parent = log
while parent:
[parent.removeHandler(h) for h in parent.handlers]
parent = parent.parent
def bind_file_handler(log_file):
# FIXME the this does not work with joblib at the moment
from idlib.utils import log as idlog
from protcur.core import log as prlog
from orthauth.utils import log as oalog
from ontquery.utils import log as oqlog
from augpathlib.utils import log as alog
from pyontutils.utils import log as pylog
sfh = SimpleFileHandler(log_file, log)
sfh(alog, idlog, oalog, oqlog, prlog, pylog)
class _log:
""" logging prevents nice ipython recurions error printing
so rename this class to log when you need fake logging """
@staticmethod
def debug(nothing): pass
@staticmethod
def info(nothing): pass
@staticmethod
def warning(nothing): print(nothing)
@staticmethod
def error(nothing): pass
@staticmethod
def critical(nothing): pass
want_prefixes = ('TEMP', 'FMA', 'UBERON', 'PATO', 'NCBITaxon', 'ilxtr',
'sparc', 'BIRNLEX', 'tech', 'unit', 'ILX', 'lex',)
def is_list_or_tuple(obj):
return isinstance(obj, list) or isinstance(obj, tuple)
def symlink_latest(dump_path, path, relative=True):
""" relative to allow moves of the containing folder
without breaking links """
if relative:
dump_path = dump_path.relative_path_from(path)
if path.exists():
if not path.is_symlink():
raise TypeError(f'Why is {path.name} not a symlink? '
f'{path!r}')
path.unlink()
path.symlink_to(dump_path)
def _transitive_(path, command):
with path:
with os.popen(command) as p:
string = p.read()
path_strings = string.split('\n') # XXX posix path names can contain newlines
# XXXXXXXXXXXXXXXXXXX REMINDER THAT THIS IS NOT SORTED
# https://doi.org/10.1021/acs.orglett.9b03216
paths = [path / s for s in path_strings if s][1:] # leave out the parent folder itself
return paths
def transitive_paths(path, exclude_patterns=tuple()):
"""Fast list of all child directories using unix find."""
if sys.platform == 'win32':
# XXX assumes that rchildren already implements exclude patterns
return list(path.rchildren)
hrm = ' '.join(['-not -path ' + repr(pat) for pat in exclude_patterns])
if hrm:
hrm = ' ' + hrm
command = f"""{_find_command} -not -path '.operations*'{hrm}"""
# TODO failover to builtin rglob
return _transitive_(path, command)
def transitive_dirs(path):
"""Fast list of all child directories using unix find."""
if sys.platform == 'win32': # no findutils
gen = os.walk(path)
next(gen) # drop path itself to avoid drp == Path('.')
return [path / t[0] for t in gen]
command = f"""{_find_command} -type d"""
# TODO failover to builtin rglob + filter
return _transitive_(path, command)
def unicode_truncate(string, length):
""" Truncate unicode string to at most maximum length.
If truncation splits a multibyte charachter then ignore
the final truncated bytes. """
return string.encode()[:length].decode(errors='ignore')
class ApiWrapper:
""" Sometimes you just need one more level of indirection!
Abstract base class to wrap Blackfynn and Pennsieve apis.
"""
_id_class = None
_api_class = None
_sec_remote = None
_dp_class = None
@classmethod
def _get_connection(cls, project_id):#, retry=10):
try:
return cls._api_class(
api_token=auth.user_config.secrets(
cls._sec_remote, project_id, 'key'),
api_secret=auth.user_config.secrets(
cls._sec_remote, project_id, 'secret'))
except KeyError as e:
msg = (f'need record in secrets for {cls._sec_remote} '
f'organization {project_id}')
raise exc.MissingSecretError(msg) from e
#except Exception as e: # was absent dsn caching + rate limits
#from time import sleep
#if 0 < retry:
# exponential falloff with the final wait being 10 seconds
#sleep(10 ** (1 / retry))
#cls._get_connection(project_id, retry=retry - 1)
#else:
#raise e
def __init__(self, project_id, anchor=None):
# no changing local storage prefix in the middle of things
# if you want to do that create a new class
if isinstance(project_id, self._id_class):
# FIXME make the _id_class version the internal default
project_id = project_id.id
import requests # SIGH
self._requests = requests
self.bf = self._get_connection(project_id)
self.organization = self.bf.context
self.project_name = self.bf.context.name
self.root = self.organization.id
self._project_id = project_id # keep it around | |
np.uint64(2)
# feature data
fd = m.create_group('FeatureData')
fd.attrs['AttributeMatrixType'] = np.uint32(7)
fd.attrs['TupleDimensions'] = np.uint64(self.grains.nrows)
Euler = np.array([Orientation.from_rodrigues(g['orientation'])
for g in self.grains], dtype=np.float32)
avg_euler = fd.create_dataset('AvgEulerAngles', data=Euler)
avg_euler.attrs['ComponentDimensions'] = np.uint64(3)
avg_euler.attrs['DataArrayVersion'] = np.int32(2)
avg_euler.attrs['ObjectType'] = np.string_('DataArray<float>')
avg_euler.attrs['Tuple Axis Dimensions'] = np.string_('x=%d' %
self.grains.nrows)
avg_euler.attrs['TupleDimensions'] = np.uint64(self.grains.nrows)
# geometry
geom = m.create_group('_SIMPL_GEOMETRY')
geom.attrs['GeometryType'] = np.uint32(999)
geom.attrs['GeometryTypeName'] = np.string_('UnkownGeometry')
# create the data container bundles group
f.create_group('DataContainerBundles')
f.close()
@staticmethod
def from_dream3d(file_path, main_key='DataContainers',
data_container='DataContainer', grain_data='FeatureData',
grain_orientations='AvgEulerAngles',
orientation_type='euler', grain_centroid='Centroids'):
"""Read a microstructure from a hdf5 file.
:param str file_path: the path to the hdf5 file to read.
:param str main_key: the string describing the root key.
:param str data_container: the string describing the data container
group in the hdf5 file.
:param str grain_data: the string describing the grain data group in the
hdf5 file.
:param str grain_orientations: the string describing the average grain
orientations in the hdf5 file.
:param str orientation_type: the string describing the descriptor used
for orientation data.
:param str grain_centroid: the string describing the grain centroid in
the hdf5 file.
:return: a `Microstructure` instance created from the hdf5 file.
"""
head, tail = os.path.split(file_path)
micro = Microstructure(name=tail, file_path=head, overwrite_hdf5=True)
with h5py.File(file_path, 'r') as f:
grain_data_path = '%s/%s/%s' % (main_key, data_container, grain_data)
orientations = f[grain_data_path][grain_orientations].value
if grain_centroid:
centroids = f[grain_data_path][grain_centroid].value
offset = 0
if len(centroids) < len(orientations):
offset = 1 # if grain 0 has not a centroid
grain = micro.grains.row
for i in range(len(orientations)):
grain['idnumber'] = i
if orientations[i, 0] == 0. and orientations[i, 1] == 0. and \
orientations[i, 2] == 0.:
# skip grain 0 which is always (0., 0., 0.)
print('skipping (0., 0., 0.)')
continue
if orientation_type == 'euler':
grain['orientation'] = Orientation.from_euler(
orientations[i] * 180 / np.pi).rod
elif orientation_type == 'rodrigues':
grain['orientation'] = Orientation.from_rodrigues(
orientations[i]).rod
if grain_centroid:
grain['center'] = centroids[i - offset]
grain.append()
micro.grains.flush()
return micro
@staticmethod
def copy_sample(src_micro_file, dst_micro_file, overwrite=False,
get_object=False, dst_name=None, autodelete=False):
""" Initiate a new SampleData object and files from existing one"""
SampleData.copy_sample(src_micro_file, dst_micro_file, overwrite,
new_sample_name=dst_name)
if get_object:
return Microstructure(filename=dst_micro_file,
autodelete=autodelete)
else:
return
@staticmethod
def from_neper(neper_file_path):
"""Create a microstructure from a neper tesselation.
Neper is an open source program to generate polycristalline
microstructure using voronoi tesselations. It is available at
https://neper.info
:param str neper_file_path: the path to the tesselation file generated
by Neper.
:return: a pymicro `Microstructure` instance.
"""
neper_file = neper_file_path.split(os.sep)[-1]
neper_dir = os.path.dirname(neper_file_path)
print('creating microstructure from Neper tesselation %s' % neper_file)
name, ext = os.path.splitext(neper_file)
print(name, ext)
filename = os.path.join(neper_dir, name)
assert ext == '.tesr' # assuming raster tesselation
micro = Microstructure(name=name, filename=filename, overwrite_hdf5=True)
with open(neper_file_path, 'r', encoding='latin-1') as f:
line = f.readline() # ***tesr
# look for **general
while True:
line = f.readline().strip() # get rid of unnecessary spaces
if line.startswith('**general'):
break
dim = f.readline().strip()
print(dim)
dims = np.array(f.readline().split()).astype(int).tolist()
print(dims)
voxel_size = np.array(f.readline().split()).astype(float).tolist()
print(voxel_size)
# look for **cell
while True:
line = f.readline().strip()
if line.startswith('**cell'):
break
n = int(f.readline().strip())
print('microstructure contains %d grains' % n)
f.readline() # *id
grain_ids = []
# look for *ori
while True:
line = f.readline().strip()
if line.startswith('*ori'):
break
else:
grain_ids.extend(np.array(line.split()).astype(int).tolist())
print('grain ids are:', grain_ids)
oridescriptor = f.readline().strip() # must be euler-bunge:passive
if oridescriptor != 'euler-bunge:passive':
print('Wrong orientation descriptor: %s, must be '
'euler-bunge:passive' % oridescriptor)
grain = micro.grains.row
for i in range(n):
euler_angles = np.array(f.readline().split()).astype(float).tolist()
print('adding grain %d' % grain_ids[i])
grain['idnumber'] = grain_ids[i]
grain['orientation'] = Orientation.from_euler(euler_angles).rod
grain.append()
micro.grains.flush()
# look for **data and handle *group if present
phase_ids = None
while True:
line = f.readline().strip()
if line.startswith('*group'):
print('multi phase sample')
phase_ids = []
while True:
line = f.readline().strip()
if line.startswith('**data'):
break
else:
phase_ids.extend(np.array(line.split()).astype(int).tolist())
print('phase ids are:', phase_ids)
if line.startswith('**data'):
break
print(f.tell())
print('reading data from byte %d' % f.tell())
data = np.fromfile(f, dtype=np.uint16)[:-4] # leave out the last 4 values
print(data.shape)
assert np.prod(dims) == data.shape[0]
micro.set_grain_map(data.reshape(dims[::-1]).transpose(2, 1, 0),
voxel_size=voxel_size[0]) # swap X/Z axes
print('updating grain geometry')
micro.recompute_grain_bounding_boxes()
micro.recompute_grain_centers()
micro.recompute_grain_volumes()
# if necessary set the phase_map
if phase_ids:
grain_map = micro.get_grain_map(as_numpy=True)
phase_map = np.zeros_like(grain_map)
for grain_id, phase_id in zip(grain_ids, phase_ids):
# ignore phase id == 1 as this corresponds to phase_map == 0
if phase_id > 1:
phase_map[grain_map == grain_id] = phase_id - 1
micro.set_phase_map(phase_map)
print('done')
return micro
@staticmethod
def from_labdct(labdct_file, data_dir='.', include_IPF_map=False,
include_rodrigues_map=False):
"""Create a microstructure from a DCT reconstruction.
:param str labdct_file: the name of the file containing the labDCT data.
:param str data_dir: the path to the folder containing the HDF5
reconstruction file.
:param bool include_IPF_map: if True, the IPF maps will be included
in the microstructure fields.
:param bool include_rodrigues_map: if True, the rodrigues map will be
included in the microstructure fields.
:return: a `Microstructure` instance created from the labDCT
reconstruction file.
"""
file_path = os.path.join(data_dir, labdct_file)
print('creating microstructure for labDCT scan %s' % file_path)
name, ext = os.path.splitext(labdct_file)
# get the phase data
with h5py.File(file_path, 'r') as f:
#TODO handle multiple phases
phase01 = f['PhaseInfo']['Phase01']
phase_name = phase01['Name'][0].decode('utf-8')
parameters = phase01['UnitCell'][()] # length unit is angstrom
a, b, c = parameters[:3] / 10 # use nm unit
alpha, beta, gamma = parameters[3:]
print(parameters)
sym = Lattice.guess_symmetry_from_parameters(a, b, c, alpha, beta, gamma)
print('found %s symmetry' % sym)
lattice = Lattice.from_parameters(a, b, c, alpha, beta, gamma, symmetry=sym)
phase = CrystallinePhase(phase_id=1, name=phase_name, lattice=lattice)
# create the microstructure with the phase infos
micro = Microstructure(name=name, path=data_dir, overwrite_hdf5=True, phase=phase)
# load cell data
with h5py.File(file_path, 'r') as f:
spacing = f['LabDCT']['Spacing'][0]
rodrigues_map = f['LabDCT']['Data']['Rodrigues'][()].transpose(2, 1, 0, 3)
grain_map = f['LabDCT']['Data']['GrainId'][()].transpose(2, 1, 0)
print('adding cell data with shape {}'.format(grain_map.shape))
micro.set_grain_map(grain_map, voxel_size=spacing)
mask = f['LabDCT']['Data']['Mask'][()].transpose(2, 1, 0)
micro.set_mask(mask, voxel_size=spacing)
phase_map = f['LabDCT']['Data']['PhaseId'][()].transpose(2, 1, 0)
micro.set_phase_map(phase_map, voxel_size=spacing)
if include_IPF_map:
IPF001_map = f['LabDCT']['Data']['IPF001'][()].transpose(2, 1, 0, 3)
micro.add_field(gridname='CellData', fieldname='IPF001_map',
array=IPF001_map)
IPF010_map = f['LabDCT']['Data']['IPF010'][()].transpose(2, 1, 0, 3)
micro.add_field(gridname='CellData', fieldname='IPF010_map',
array=IPF010_map)
IPF100_map = f['LabDCT']['Data']['IPF100'][()].transpose(2, 1, 0, 3)
micro.add_field(gridname='CellData', fieldname='IPF100_map',
array=IPF100_map)
if include_rodrigues_map:
micro.add_field(gridname='CellData', fieldname='rodrigues_map',
array=rodrigues_map)
# create grain data table infos
grain_ids = np.unique(grain_map)
print(grain_ids)
micro.build_grain_table_from_grain_map()
# now get each grain orientation from the rodrigues map
for i, g in enumerate(micro.grains):
gid = g['idnumber']
progress = (1 + i) / len(micro.grains)
print('adding grains: {0:.2f} %'.format(progress), end='\r')
x, y, z = np.where(micro.get_grain_map() == gid)
orientation = rodrigues_map[x[0], y[0], z[0]]
# assign orientation to this grain
g['orientation'] = orientation
g.update()
micro.grains.flush()
return micro
@staticmethod
def from_dct(data_dir='.', grain_file='index.mat',
vol_file='phase_01_vol.mat', mask_file='volume_mask.mat',
use_dct_path=True, verbose=True):
"""Create a microstructure from a DCT reconstruction.
DCT reconstructions are stored in several files. The indexed grain
informations are stored in a matlab file in the '4_grains/phase_01'
folder. Then, the reconstructed volume file (labeled image) is stored
in the '5_reconstruction' folder as an hdf5 file, possibly stored
alongside a mask file coming from the absorption reconstruction.
:param str data_dir: the path to the folder containing the
reconstruction data.
:param str grain_file: the name of the file containing grains info.
:param str vol_file: the name of the volume file.
:param str mask_file: the name of the mask file.
:param bool use_dct_path: if True, the grain_file should be located in
4_grains/phase_01 folder and the vol_file and mask_file in the
5_reconstruction folder.
:param bool verbose: activate verbose mode.
:return: a `Microstructure` instance created from the DCT reconstruction.
"""
if data_dir == '.':
data_dir = os.getcwd()
if data_dir.endswith(os.sep):
data_dir = data_dir[:-1]
scan = data_dir.split(os.sep)[-1]
print('creating microstructure for DCT scan %s' % scan)
filename = os.path.join(data_dir,scan)
micro = Microstructure(filename=filename, overwrite_hdf5=True)
micro.data_dir = data_dir
if use_dct_path:
index_path = os.path.join(data_dir, '4_grains', 'phase_01',
grain_file)
else:
index_path = os.path.join(data_dir, grain_file)
print(index_path)
if not os.path.exists(index_path):
raise ValueError('%s not found, please specify a valid path to the'
' grain file.' % index_path)
return None
from scipy.io import loadmat
index = loadmat(index_path)
#TODO fetch pixel size from detgeo instead
voxel_size = index['cryst'][0][0][25][0][0]
# grab the crystal lattice
lattice_params = index['cryst'][0][0][3][0]
sym = Symmetry.from_string(index['cryst'][0][0][7][0])
print('creating crystal lattice {} ({}) with parameters {}'
''.format(index['cryst'][0][0][0][0], sym, lattice_params))
lattice_params[:3] /= 10 # angstrom to nm
lattice = Lattice.from_parameters(*lattice_params, symmetry=sym)
micro.set_lattice(lattice)
# add all grains to the microstructure
grain = micro.grains.row
for i in range(len(index['grain'][0])):
grain['idnumber'] = index['grain'][0][i][0][0][0][0][0]
grain['orientation'] = index['grain'][0][i][0][0][3][0]
grain['center'] = index['grain'][0][i][0][0][15][0]
grain.append()
micro.grains.flush()
| |
we'll leak memory (hold on to objects in the cache that could have been
# garbage collected).
tracer.log("caller")
self.object_engine.run_sql("SET LOCAL synchronous_commit TO off")
self._release_objects(required_objects)
tracer.log("release_objects")
logging.debug("Releasing %s", pluralise("object", len(required_objects)))
if table:
logging.debug(
"Timing stats for %s/%s/%s/%s: \n%s",
table.repository.namespace,
table.repository.repository,
table.image.image_hash,
table.table_name,
tracer,
)
self.object_engine.commit()
# Release the metadata tables as well
self.metadata_engine.commit()
return CallbackList([_f])
def make_objects_external(
self, objects: List[str], handler: str, handler_params: Dict[Any, Any]
) -> None:
"""
Uploads local objects to an external location and marks them as being cached locally (thus making it possible
to evict or swap them out).
:param objects: Object IDs to upload. Will do nothing for objects that already exist externally.
:param handler: Object handler
:param handler_params: Extra handler parameters
"""
# Get objects that haven't been uploaded
uploaded_objects = [o[0] for o in self.get_external_object_locations(objects)]
new_objects = [o for o in objects if o not in uploaded_objects]
logging.debug(
"%s of %d haven't been uploaded yet: %r",
pluralise("object", len(new_objects)),
len(objects),
new_objects,
)
if not new_objects:
return
# Similar to claim_objects, make sure we don't deadlock with other uploaders
# by keeping a consistent order.
new_objects = sorted(new_objects)
# Insert the objects into the cache status table (marking them as not ready)
now = dt.utcnow()
self.object_engine.run_sql_batch(
insert("object_cache_status", ("object_id", "ready", "refcount", "last_used"))
+ SQL("ON CONFLICT (object_id) DO UPDATE SET ready = 'f'"),
[(object_id, False, 1, now) for object_id in new_objects],
)
# Grab the objects that we're supposed to be uploading.
claimed_objects = self.object_engine.run_sql(
select("object_cache_status", "object_id", "ready = 'f' FOR UPDATE"),
return_shape=ResultShape.MANY_ONE,
)
new_objects = [o for o in new_objects if o in claimed_objects]
# Perform the actual upload
external_handler = get_external_object_handler(handler, handler_params)
partial_failure: Optional[IncompleteObjectUploadError] = None
try:
with switch_engine(self.object_engine):
successful = {
o: u
for o, u in external_handler.upload_objects(new_objects, self.metadata_engine)
}
except IncompleteObjectUploadError as e:
partial_failure = e
successful = {o: u for o, u in zip(e.successful_objects, e.successful_object_urls)}
locations = [(o, u, handler) for o, u in successful.items()]
self.register_object_locations(locations)
# Increase the cache occupancy since the objects can now be evicted.
self._increase_cache_occupancy(list(successful.keys()))
# Mark the objects as ready and decrease their refcounts.
self._set_ready_flags(list(successful.keys()), True)
self._release_objects(list(successful.keys()))
self.object_engine.commit()
self.metadata_engine.commit()
if partial_failure:
if partial_failure.reason:
raise partial_failure.reason
else:
raise ObjectCacheError("Some objects failed to upload!")
# Perform eviction in case we've reached the capacity of the cache
excess = self.get_cache_occupancy() - self.cache_size
if excess > 0:
self.run_eviction(keep_objects=[], required_space=excess)
def _prepare_fetch_list(self, required_objects: List[str]) -> List[str]:
"""
Calculates the missing objects and ensures there's enough space in the cache
to download them.
:param required_objects: Iterable of object IDs that are required to be on the engine.
:return: Set of objects to fetch
"""
to_fetch: List[str] = self.object_engine.run_sql(
select("object_cache_status", "object_id", "ready = 'f'"),
return_shape=ResultShape.MANY_ONE,
)
if to_fetch:
# If we need to download anything, take out an exclusive lock on the cache since we might
# need to run eviction and don't want multiple managers trying to download the same things.
# This used to be more granular (allow multiple managers downloading objects) but was resulting
# in fun concurrency bugs and deadlocks that I don't have the willpower to investigate further
# right now (e.g. two managers trying to download the same objects at the same time after one of them
# runs cache eviction and releases some locks -- or two managers trying to free the same amount
# of space in the cache for the same set of objects).
# Since we already hold a row-level lock on some objects in the cache, we have to release it first and
# lock the full table then -- but this means someone else might start downloading the objects that
# we claimed. So, once we acquire the lock, we recalculate the fetch list again to see what
# we're supposed to be fetching.
self.object_engine.commit()
self.object_engine.lock_table(SPLITGRAPH_META_SCHEMA, "object_cache_status")
to_fetch = self.object_engine.run_sql(
select("object_cache_status", "object_id", "ready = 'f'"),
return_shape=ResultShape.MANY_ONE,
)
# If someone else downloaded all the objects we need, there's no point in holding the lock.
# This is tricky to test with a single process.
if not to_fetch: # pragma: no cover
self.object_engine.commit()
return to_fetch
required_space = sum(o.size for o in self.get_object_meta(list(to_fetch)).values())
current_occupied = self.get_cache_occupancy()
logging.info(
"Need to download %s (%s), cache occupancy: %s/%s",
pluralise("object", len(required_objects)),
pretty_size(required_space),
pretty_size(current_occupied),
pretty_size(self.cache_size),
)
# If the total cache size isn't large enough, there's nothing we can do without cooperating with the
# caller and seeing if they can use the objects one-by-one.
if required_space > self.cache_size:
raise ObjectCacheError(
"Not enough space in the cache to download the required objects!"
)
if required_space > self.cache_size - current_occupied:
to_free = required_space + current_occupied - self.cache_size
logging.info("Need to free %s", pretty_size(to_free))
self.run_eviction(required_objects, to_free)
self.object_engine.commit()
# Finally, after we're done with eviction, relock the objects that we're supposed to be downloading.
to_fetch = self.object_engine.run_sql(
select("object_cache_status", "object_id", "ready = 'f' FOR UPDATE"),
return_shape=ResultShape.MANY_ONE,
)
return to_fetch
def _claim_objects(self, objects: List[str]) -> None:
"""Increases refcounts and bumps the last used timestamp to now for cached objects.
For objects that aren't in the cache, checks that they don't already exist locally and then
adds them to the cache status table, marking them with ready=False
(which must be set to True by the end of the operation)."""
if not objects:
return
now = dt.utcnow()
# Objects that were created locally aren't supposed to be claimed here or have an entry in the cache.
# So, we first try to update cache entries to bump their refcount, see which ones we updated,
# subtract objects that we have locally and insert the remaining entries as new cache entries.
claimed = self.object_engine.run_sql(
SQL( # nosec
"UPDATE {}.object_cache_status SET refcount = refcount + 1, "
"last_used = %s WHERE object_id IN ("
).format(Identifier(SPLITGRAPH_META_SCHEMA))
+ SQL(",".join(itertools.repeat("%s", len(objects))))
+ SQL(") RETURNING object_id"),
[now] + objects, # type: ignore
return_shape=ResultShape.MANY_ONE,
)
claimed = claimed or []
remaining = set(objects).difference(set(claimed))
remaining = remaining.difference(set(self.get_downloaded_objects(limit_to=list(remaining))))
# Since we send multiple queries, each claiming a single remaining object, we can deadlock here
# with another object manager instance. Hence, we sort the list of objects so that we claim them
# in a consistent order between all instances.
remaining = sorted(remaining)
# Remaining: objects that are new to the cache and that we'll need to download. However, between us
# running the first query and now, somebody else might have started downloading them. Hence, when
# we try to insert them, we'll be blocked until the other engine finishes its download and commits
# the transaction -- then get an integrity error. So here, we do an update on conflict (again).
self.object_engine.run_sql_batch(
insert("object_cache_status", ("object_id", "ready", "refcount", "last_used"))
+ SQL(
"ON CONFLICT (object_id) DO UPDATE SET refcount = EXCLUDED.refcount + 1, last_used = %s"
),
[(object_id, False, 1, now, now) for object_id in remaining],
)
def _set_ready_flags(self, objects: List[str], is_ready: bool = True) -> None:
if objects:
self.object_engine.run_sql(
SQL( # nosec
"UPDATE {0}.object_cache_status SET ready = %s WHERE object_id IN ("
+ ",".join(itertools.repeat("%s", len(objects)))
+ ")"
).format(Identifier(SPLITGRAPH_META_SCHEMA)),
[is_ready] + list(objects), # type: ignore
)
def _release_objects(self, objects: List[str]) -> None:
"""Decreases objects' refcounts."""
if objects:
self.object_engine.run_sql(
SQL( # nosec
"UPDATE {}.{} SET refcount = refcount - 1 WHERE object_id IN ("
+ ",".join(itertools.repeat("%s", len(objects)))
+ ")"
).format(Identifier(SPLITGRAPH_META_SCHEMA), Identifier("object_cache_status")),
objects,
)
def _increase_cache_occupancy(self, objects: List[str]) -> None:
"""Increase the cache occupancy by objects' total size."""
if not objects:
return
total_size = sum(o.size for o in self.get_object_meta(objects).values())
self.object_engine.run_sql(
SQL("UPDATE {}.object_cache_occupancy SET total_size = total_size + %s").format(
Identifier(SPLITGRAPH_META_SCHEMA)
),
(total_size,),
)
def _decrease_cache_occupancy(self, size_freed: int) -> None:
"""Decrease the cache occupancy by a given size."""
self.object_engine.run_sql(
SQL("UPDATE {}.object_cache_occupancy SET total_size = total_size - %s").format(
Identifier(SPLITGRAPH_META_SCHEMA)
),
(size_freed,),
)
def run_eviction(self, keep_objects: List[str], required_space: Optional[int] = None) -> None:
"""
Delete enough objects with zero reference count (only those, since we guarantee that whilst refcount is >0,
the object stays alive) to free at least `required_space` in the cache.
:param | |
corrector.Execute(inputImage, maskImage)
corrected_img = sitk.GetArrayFromImage(corrected_itk_img)
ax[ind, 1].imshow(corrected_img[largest, :, :], cmap='gray')
if not no_masks:
for contour in pancreas_contours:
ax[ind, 1].plot(contour[:, 1], contour[:, 0], linewidth=2, color='r', alpha=0.5)
ax[ind, 1].set_title('{} N4BiasFieldCorrected'.format(mod_name))
ax[ind, 1].axis('off')
print('\tPerforming CurvatureAnisotropicFilter on {} Image.'.format(mod_name))
filtered_itk_img = sitk.CurvatureAnisotropicDiffusion(corrected_itk_img, timeStep=0.015)
filtered_img = sitk.GetArrayFromImage(filtered_itk_img)
ax[ind, 2].imshow(filtered_img[largest, :, :], cmap='gray')
if not no_masks:
for contour in pancreas_contours:
ax[ind, 2].plot(contour[:, 1], contour[:, 0], linewidth=2, color='r', alpha=0.5)
ax[ind, 2].set_title('{} CurvatureAnsiotropicFiltered'.format(mod_name))
ax[ind, 2].axis('off')
# # Nyul and Udupa histogram normalization routine with a given set of learned landmarks
# print('\tPerforming Nyul and Udupa histogram normalization on {} Images.'.format(mod_name))
# if np.array_equal(standard_scale[ind], np.zeros(len(percs))):
# hm_scale(root_path, mod_dirs, exp_name, index=ind, no_masks=True)
#
# mask = sitk.GetArrayFromImage(maskImage)
# masked = filtered_img[mask > 0]
# landmarks = np.percentile(masked, percs)
# f = interp1d(landmarks, standard_scale[ind], fill_value='extrapolate')
# normed_img = f(filtered_img)
out_img = np.rollaxis(filtered_img, 0, 3)
out_img = out_img.astype(np.float32)
# top_ninety = np.percentile(out_img, 99)
# bottom_ten = np.percentile(out_img, 1)
# out_img[out_img > top_ninety] = top_ninety
# out_img[out_img < bottom_ten] = bottom_ten
out_img -= np.min(out_img)
out_img /= np.max(out_img)
h_rem = out_img.shape[0] % 2 ** 5
w_rem = out_img.shape[1] % 2 ** 5
if h_rem != 0 or w_rem != 0:
out_img = np.pad(out_img, ((int(np.ceil(h_rem / 2.)), int(np.floor(h_rem / 2.))),
(int(np.ceil(w_rem / 2.)), int(np.floor(w_rem / 2.))),
(0, 0)), 'symmetric')
corrected_imgs.append(out_img)
# ax[ind, 3].imshow(out_img[:, :, largest], cmap='gray')
# if not no_masks:
# for contour in pancreas_contours:
# ax[ind, 3].plot(contour[:, 1], contour[:, 0], linewidth=2, color='r', alpha=0.5)
# ax[ind, 3].set_title('{} Nyul&Udupa HistNorm'.format(mod_name))
# ax[ind, 3].axis('off')
# Performing MIP for plotting only
if not no_masks:
if out_img.shape[-1] >= 5:
try:
if mod_name == 'T1': # TODO: don't harcode this, change to args.MIP_choices
ax[ind, 3].imshow(np.min(out_img[:, :, largest-2:largest+2], axis=-1), cmap='gray')
ax[ind, 3].set_title('{} Min Intensity Projection - 5 slices'.format(mod_name))
elif mod_name == 'T2': # TODO: don't harcode this, change to args.MIP_choices
ax[ind, 3].imshow(np.max(out_img[:, :, largest-2:largest+2], axis=-1), cmap='gray')
ax[ind, 3].set_title('{} Max Intensity Projection - 5 slices'.format(mod_name))
for contour in pancreas_contours:
ax[ind, 3].plot(contour[:, 1], contour[:, 0], linewidth=2, color='r', alpha=0.5)
ax[ind, 3].axis('off')
except:
if mod_name == 'T1': # TODO: don't harcode this, change to args.MIP_choices
ax[ind, 3].imshow(np.min(out_img[:, :, 0:5], axis=-1), cmap='gray')
ax[ind, 3].set_title('{} Min Intensity Projection - 5 slices'.format(mod_name))
elif mod_name == 'T2': # TODO: don't harcode this, change to args.MIP_choices
ax[ind, 3].imshow(np.max(out_img[:, :, 0:5], axis=-1), cmap='gray')
ax[ind, 3].set_title('{} Max Intensity Projection - 5 slices'.format(mod_name))
for contour in pancreas_contours:
ax[ind, 3].plot(contour[:, 1], contour[:, 0], linewidth=2, color='r', alpha=0.5)
ax[ind, 3].axis('off')
else:
try:
if mod_name == 'T1': # TODO: don't harcode this, change to args.MIP_choices
ax[ind, 3].imshow(np.min(out_img[:, :, largest-1:largest+1], axis=-1), cmap='gray')
ax[ind, 3].set_title('{} Min Intensity Projection - 3 slices'.format(mod_name))
elif mod_name == 'T2': # TODO: don't harcode this, change to args.MIP_choices
ax[ind, 3].imshow(np.max(out_img[:, :, largest-1:largest+1], axis=-1), cmap='gray')
ax[ind, 3].set_title('{} Max Intensity Projection - 3 slices'.format(mod_name))
for contour in pancreas_contours:
ax[ind, 3].plot(contour[:, 1], contour[:, 0], linewidth=2, color='r', alpha=0.5)
ax[ind, 3].axis('off')
except:
if mod_name == 'T1': # TODO: don't harcode this, change to args.MIP_choices
ax[ind, 3].imshow(np.min(out_img[:, :, 0:3], axis=-1), cmap='gray')
ax[ind, 3].set_title('{} Min Intensity Projection - 3 slices'.format(mod_name))
elif mod_name == 'T2': # TODO: don't harcode this, change to args.MIP_choices
ax[ind, 3].imshow(np.max(out_img[:, :, 0:3], axis=-1), cmap='gray')
ax[ind, 3].set_title('{} Max Intensity Projection - 3 slices'.format(mod_name))
for contour in pancreas_contours:
ax[ind, 3].plot(contour[:, 1], contour[:, 0], linewidth=2, color='r', alpha=0.5)
ax[ind, 3].axis('off')
fig = plt.gcf()
fig.suptitle(t='{}, IPMN Score: {}'.format(fname, img_names[-1]), y=0.94, fontsize=16)
plt.savefig(os.path.join(fig_path, fname + '.png'), format='png', bbox_inches='tight')
plt.close(fig)
# TODO: Make this handle any number of modalities
if not no_masks:
np.savez_compressed(os.path.join(numpy_path, fname + '.npz'), T1=corrected_imgs[0], T2=corrected_imgs[1],
mask=pancreas_mask)
else:
np.savez_compressed(os.path.join(numpy_path, fname + '.npz'), T1=corrected_imgs[0], T2=corrected_imgs[1])
if not no_masks:
return np.stack((corrected_imgs[0], corrected_imgs[1]), axis=-1), pancreas_mask
else:
return np.stack((corrected_imgs[0], corrected_imgs[1]), axis=-1)
except Exception as e:
print('\n'+'-'*100)
print('Unable to load img or masks for {}'.format(fname))
print(e)
print('Skipping file')
print('-'*100+'\n')
return np.zeros(1), np.zeros(1)
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def augmentImages(batch_of_images, batch_of_masks=None):
# Data augmentation for deep learning, pretty standard plus two custom ones.
for i in range(len(batch_of_images)):
if batch_of_images.ndim == 5:
_, h, w, c, m = batch_of_images.shape
imgs_reshaped = np.reshape(batch_of_images[i, :, :, :, :], (h, w, c * m))
if batch_of_masks is not None:
mask_reshaped = np.reshape(batch_of_masks[i, :, :, :, :], (h, w, c * 1))
else:
imgs_reshaped = batch_of_images[i, :, :, :]
if batch_of_masks is not None:
mask_reshaped = batch_of_masks[i, :, :, :]
if batch_of_masks is not None:
img_and_mask = np.concatenate((imgs_reshaped, mask_reshaped), axis=2)
else:
img_and_mask = imgs_reshaped
if np.random.randint(0,2):
img_and_mask = random_rotation(img_and_mask, rg=30, row_axis=0, col_axis=1, channel_axis=2,
fill_mode='constant', cval=0.)
if np.random.randint(0, 2):
img_and_mask = elastic_transform(img_and_mask, alpha=500, sigma=30, alpha_affine=1)
if np.random.randint(0, 2):
img_and_mask = random_shift(img_and_mask, wrg=0.1, hrg=0.1, row_axis=0, col_axis=1, channel_axis=2,
fill_mode='constant', cval=0.)
if np.random.randint(0, 2):
img_and_mask = random_shear(img_and_mask, intensity=8, row_axis=0, col_axis=1, channel_axis=2,
fill_mode='constant', cval=0.)
if np.random.randint(0, 2):
img_and_mask = random_zoom(img_and_mask, zoom_range=(0.9, 0.9), row_axis=0, col_axis=1, channel_axis=2,
fill_mode='constant', cval=0.)
if np.random.randint(0, 2):
img_and_mask = flip_axis(img_and_mask, axis=1)
if np.random.randint(0, 2):
img_and_mask = flip_axis(img_and_mask, axis=0)
if np.random.randint(0, 2):
salt_pepper_noise(img_and_mask, salt=0.2, amount=0.04)
if batch_of_masks is not None:
aug_imgs = img_and_mask[:, :, :-c]
if batch_of_images.ndim == 5:
batch_of_masks[i, :, :, :] = np.reshape(img_and_mask[:, :, -c:], (h, w, c, 1))
else:
batch_of_masks[i, :, :, :] = img_and_mask[:, :, -c:]
# Ensure the masks did not get any non-binary values.
batch_of_masks[batch_of_masks > 0.5] = 1
batch_of_masks[batch_of_masks <= 0.5] = 0
else:
aug_imgs = img_and_mask
if batch_of_images.ndim == 5:
batch_of_images[i, :, :, :, :] = np.reshape(aug_imgs, (h, w, c, m))
else:
batch_of_images[i, :, :, :] = aug_imgs
return(batch_of_images, batch_of_masks)
''' Make the generators threadsafe in case of multiple threads '''
class threadsafe_iter:
"""Takes an iterator/generator and makes it thread-safe by
serializing call to the `next` method of given iterator/generator.
"""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
def __iter__(self):
return self
def __next__(self):
with self.lock:
return self.it.__next__()
def threadsafe_generator(f):
"""A decorator that takes a generator function and makes it thread-safe.
"""
def g(*a, **kw):
return threadsafe_iter(f(*a, **kw))
return g
def get_one_hot(targets, nb_classes):
res = np.eye(nb_classes)[np.array(targets).reshape(-1)]
return res.reshape(list(targets.shape)+[nb_classes])
def find_mask_endpoints(mask):
first = -1
last = -1
for i in range(mask.shape[-1]):
if np.any(mask[:,:,i]) and first == -1:
first = i
if np.any(mask[:,:,mask.shape[-1]-i-1]) and last == -1:
last = mask.shape[-1] - i - 1
if first != -1 and last != -1:
break
largest = np.argmax(np.count_nonzero(mask, axis=(0,1)))
return first, last, largest
@threadsafe_generator
def generate_train_batches(root_path, train_list, net_shape, mod_dirs, exp_name, net, MIP_choices,
n_class=1, batchSize=1, numSlices=1, subSampAmt=-1, stride=1, downSampAmt=1,
shuff=1, aug_data=1):
if n_class == 2:
n_class = 1 # To classes is binary (0,1).
# Create placeholders for training
if net.find('3d') != -1 or net.find('inflated') != -1:
img_batch = np.zeros(((batchSize, net_shape[0], net_shape[1], numSlices,
3*len(list(mod_dirs.split(','))[:-1]))), dtype=np.float32)
else:
img_batch = np.zeros(((batchSize, net_shape[0], net_shape[1], 3*len(list(mod_dirs.split(','))[:-1]))),
dtype=np.float32)
gt_batch = np.zeros(((batchSize, n_class)), dtype=np.uint8)
try:
MIP_choices = MIP_choices.replace(' ', '')
mip_list = MIP_choices.split(',')
except:
raise Exception('Unable to convert MIP_choices to a list of integers. Please check this argument.')
while True:
if shuff:
shuffle(train_list)
count = 0
for scan_names in train_list:
path_to_np = os.path.join(root_path,'np_files',scan_names[0].split(os.sep)[1]+'.npz')
if scan_names[0].split(os.sep)[2].split('_')[0] == 'greece' or scan_names[0].split(os.sep)[2].split('_')[0] == 'irene':
path_to_np = path_to_np[:-4] + '_' + scan_names[0].split(os.sep)[2].split('_')[0] + path_to_np[-4:]
try:
with np.load(path_to_np) as data:
imgs = np.stack((data['T1'], data['T2']), axis=-1) # TODO: Find modalities not hardcode
mask = data['mask']
except:
print('\nPre-made numpy array not found for {}.\n Creating now...'.format(os.path.basename(path_to_np)))
imgs, mask = convert_data_to_numpy(root_path=root_path, img_names=scan_names, mod_dirs=mod_dirs,
exp_name=exp_name, no_masks=False)
if np.array_equal(imgs,np.zeros(1)):
continue
else:
print('\nFinished making npz file.')
gt = int(scan_names[-1]) # GT depends on the experiment, do not save/load these, grab from train list.
if imgs.shape[-2] < numSlices * (subSampAmt+1):
imgs = np.pad(imgs, ((0,0), (0,0), (int(np.floor((numSlices * (subSampAmt+1) - imgs.shape[-2]) / 2)),
int(np.ceil((numSlices * (subSampAmt+1) - imgs.shape[-2]) / 2))),
(0,0)), mode='symmetric')
indicies = [0]
else:
mask_first_nonzero, mask_last_nonzero, mask_largest = find_mask_endpoints(mask)
if mask_first_nonzero == -1 or mask_last_nonzero == -1 or mask_largest == -1:
mask_first_nonzero = 0
mask_last_nonzero = imgs.shape[-2]
mask_largest = (mask_first_nonzero + mask_last_nonzero) // 2
if numSlices == 1:
subSampAmt = 0
elif subSampAmt == -1 and numSlices > 1:
np.random.seed(None)
subSampAmt = int(rand(1) * (mask_last_nonzero - mask_first_nonzero) * 0.25)
while mask_last_nonzero - numSlices * (subSampAmt + 1) + 1 <= mask_first_nonzero:
subSampAmt -= 1
if subSampAmt == 0:
break
indicies = np.arange(mask_first_nonzero, mask_last_nonzero - numSlices | |
<filename>roombapy/mapping/roomba_mapper.py
import io
import math
import logging
import os
import time
from typing import TYPE_CHECKING, NamedTuple, Tuple
import textwrap
# Import trickery
global HAVE_PIL
HAVE_PIL = False
try:
from PIL import Image, ImageDraw, ImageFont
HAVE_PIL = True
except ImportError:
print("PIL module not found, maps are disabled")
if TYPE_CHECKING:
from ..roomba import Roomba
from ..const import ROOMBA_STATES
from .const import (
DEFAULT_BG_COLOR,
DEFAULT_ICON_SIZE,
DEFAULT_MAP_MAX_ALLOWED_DISTANCE,
DEFAULT_MAP_SKIP_POINTS,
DEFAULT_PATH_COLOR,
DEFAULT_PATH_WIDTH
)
from .math_helpers import clamp, rotate, interpolate
from .image_helpers import transparent, make_blank_image, center_image
from .misc_helpers import get_mapper_asset
from .roomba_icon_set import RoombaIconSet
from .roomba_map_device import RoombaMapDevice
from .roomba_map import RoombaMap
class RoombaPosition(NamedTuple):
x: int
y: int
theta: int
class MapRenderParameters(NamedTuple):
icon_set: str
device: str
bg_color: Tuple[int,int,int,int]
path_color: Tuple[int,int,int,int]
path_width: int
class RoombaMapper:
def __init__(self,
roomba: 'Roomba',
font: ImageFont.ImageFont = None,
assets_path = "{PKG}/assets"
):
self.log = logging.getLogger(__name__)
self.roomba = roomba
self.map_enabled = False
self.assets_path = assets_path
#initialize the font
self.font = font
if self.font is None:
try:
self.font = ImageFont.truetype(get_mapper_asset(assets_path, "monaco.ttf"), 30)
except Exception as e:
self.log.warning(f"Error loading font, loading default font")
self.font = ImageFont.load_default()
#generate the default icons
self._icons: dict[str,RoombaIconSet] = {}
self.add_icon_set("default"),
self.add_icon_set("m", roomba_icon="m6_icon.png")
self.add_icon_set("j", roomba_icon="j7_icon.png")
self.add_icon_set("s", roomba_icon="s9_icon.png")
#mapping variables
self._map: RoombaMap = None
self._device: RoombaMapDevice = None
self._render_params: MapRenderParameters = None
self._rendered_map: Image.Image = None
self._base_rendered_map: Image.Image = None
self._points_to_skip = DEFAULT_MAP_SKIP_POINTS
self._points_skipped = 0
self._max_distance = DEFAULT_MAP_MAX_ALLOWED_DISTANCE
self._history = []
self._history_translated: list[RoombaPosition] = []
#initialize a base map
self._initialize_map()
@property
def roomba_image_pos(self) -> RoombaPosition:
try:
#roomba sometimes doesn't show the right coords when docked,
#override the coordinates just in case
if self.roomba.docked:
return self._map_coord_to_image_coord(self.roomba.zero_coords())
return self._history_translated[-1]
except:
return RoombaPosition(None,None,None)
@property
def origin_image_pos(self) -> RoombaPosition:
return self._map_coord_to_image_coord(self.roomba.zero_coords())
@property
def min_coords(self) -> Tuple[int,int]:
if len(self._history) > 0:
return (
min(list(map(lambda p: p["x"], self._history))),
min(list(map(lambda p: p["y"], self._history)))
)
else:
return (0,0)
@property
def max_coords(self) -> Tuple[int,int]:
if len(self._history) > 0:
return (
max(list(map(lambda p: p["x"], self._history))),
max(list(map(lambda p: p["y"], self._history)))
)
else:
return (0,0)
@property
def map_name(self):
if self._map and self._map.name:
return self._map.name
return None
def add_icon_set(self,
name: str,
icon_path: str = "{PKG}/assets",
home_icon = None,
roomba_icon = None,
error_icon = None,
cancelled_icon = None,
battery_low_icon = None,
charging_icon = None,
bin_full_icon = None,
tank_low_icon = None,
icon_size = DEFAULT_ICON_SIZE,
show_direction = True
):
if not name:
self.log.error("Icon sets must have names")
return
i = RoombaIconSet(size=icon_size, show_direction=show_direction, log=self.log)
if roomba_icon:
i.roomba = self._get_mapper_asset(icon_path, roomba_icon)
if error_icon:
i.error = self._get_mapper_asset(icon_path, error_icon)
if cancelled_icon:
i.cancelled = self._get_mapper_asset(icon_path, cancelled_icon)
if battery_low_icon:
i.battery_low = self._get_mapper_asset(icon_path, battery_low_icon)
if charging_icon:
i.charging = self._get_mapper_asset(icon_path, charging_icon)
if bin_full_icon:
i.bin_full = self._get_mapper_asset(icon_path, bin_full_icon)
if tank_low_icon:
i.tank_low = self._get_mapper_asset(icon_path, tank_low_icon)
if home_icon:
i.home = self._get_mapper_asset(icon_path, home_icon)
self._icons[name] = i
def add_map_device(self, name: str, device: RoombaMapDevice):
if not name:
self.log.error("Devices must have names")
return
self._devices[name] = device
def _get_mapper_asset(self, icon_path: str, icon):
if isinstance(icon, str):
return get_mapper_asset(icon_path, icon)
if isinstance(icon, Image.Image):
return icon
else:
return None
def reset_map(self, map: RoombaMap, device: RoombaMapDevice = None, points_to_skip: int = DEFAULT_MAP_SKIP_POINTS):
self.map_enabled = self.roomba.cap.get("pose", False) and HAVE_PIL
self._history = []
self._history_translated = []
self._map = map
self._device = device
self._points_to_skip = points_to_skip
self._points_skipped = 0
self._initialize_map()
def _initialize_map(self):
self._render_params = self._get_render_parameters()
#make sure we have a map
if not self._map:
self._map = RoombaMap("default",None)
#generate the base on which other layers will be composed
base = self._map_blank_image(color=self._render_params.bg_color)
#add the floorplan if available
if self._map and self._map.floorplan:
base = Image.alpha_composite(base, self._map.floorplan)
#set our internal variables so that we can get the default
self._base_rendered_map = base
self._rendered_map = base
def update_map(self, force_redraw = False):
"""Updates the cleaning map"""
#if mapping not enabled, nothing to update
if not self.map_enabled:
return
if (self.roomba.changed('pose') or self.roomba.changed('phase') or
force_redraw) and self.map_enabled:
#make sure we have phase info before trying to render
if self.roomba.current_state is not None:
self._update_state()
self._render_map()
def get_map(self, width: int = None, height: int = None) -> bytes:
#get the default map
map = self._rendered_map
#if we haven't rendered anything, just return
if map is None:
return None
#if we have a requested size, resize it
if width and height:
map = map.resize((width,height))
pass
#save the internal image
with io.BytesIO() as stream:
map.save(stream, format="PNG")
return stream.getvalue()
def _update_state(self):
position: dict[str,int] = None
if self.roomba.changed('pose'):
position = self.roomba.co_ords
self.log.debug(f"MAP [State Update]: co-ords: {self.roomba.co_ords} \
phase: {self.roomba.phase}, \
state: {self.roomba.current_state}")
if self.roomba.current_state == ROOMBA_STATES["charge"]:
position = None
elif self.roomba.current_state == ROOMBA_STATES["evac"]:
position = None
elif self.roomba.current_state == ROOMBA_STATES["completed"]:
self.log.info("MAP [State Update]: Mission Complete")
elif self.roomba.current_state == ROOMBA_STATES["run"]:
if self.roomba.co_ords == self.roomba.zero_coords(theta=0):
#bogus pose received, can't have 0,0,0 when running, usually happens after recovering from an error condition
self.log.warning('MAP [State Update]: received 0,0,0 pose when running - ignoring')
position = None
#if we have a position update, append to the history if it meets our criteria
if position:
#there's a few points at the beginning that are usually erroneous, skip them
if self._points_skipped < self._points_to_skip:
self._points_skipped += 1
return
#if we have history, we need to check a couple things
if len(self._history) > 0:
old = self._history[-1]
old_x = old["x"]
old_y = old["y"]
new_x = position["x"]
new_y = position["y"]
#if we didn't actually move from the last recorded position, ignore it
if (old_x,old_y) == (new_x,new_y):
return
#at times, roomba reports erroneous points, ignore if too large of a gap
#between measurements
if self._map_distance((old_x,old_y),(new_x,new_y)) > self._max_distance:
return
self._history.append(position)
self._history_translated.append(self._map_coord_to_image_coord(position))
def _map_coord_to_image_coord(self, coord: dict) -> RoombaPosition:
x: float = float(coord["x"])
y: float = float(coord["y"])
theta: float = float(coord["theta"])
#perform rotation: occurs about the map origin, so should
#undo any rotation that exists
x, y = rotate(x, y, self._map.angle,
invert_x = self._map.coords_start[0] > self._map.coords_end[0],
invert_y = self._map.coords_start[1] < self._map.coords_end[1]
)
#interpolate the x,y coordinates to scale to the appropriate output
img_x = interpolate(
x,
[self._map.coords_start[0], self._map.coords_end[0]],
[0, self._map.img_width - 1]
)
img_y = interpolate(
y,
[self._map.coords_start[1], self._map.coords_end[1]],
[0, self._map.img_height - 1]
)
#make sure we stay within the bounds
clamp(img_x, 0, self._map.img_width)
clamp(img_y, 0, self._map.img_height)
#adjust theta
#from what I can see, it looks like the roomba uses a coordinate system:
#0 = facing away from the dock, increasing angle counter-clockwise
#it looks like past 180, the roomba uses negative angles, but still seems
#to be in the counter-clockwise direction
#PIL denotes angles in the counterclockwise direction
#so, to compute the right angle, we need to:
#1) add map angle
#2) add 180 degrees (roomba image faces up, but should face away at 0)
#2) add theta
#3) mod 360, add 360 if negative
img_theta = (self._map.angle + theta + 180) % 360
if img_theta < 0:
img_theta += 360
#return the tuple
return RoombaPosition(int(img_x), int(img_y), int(img_theta))
def _render_map(self):
"""Renders the map"""
#draw in the vacuum path
base = self._draw_vacuum_path(self._base_rendered_map)
#draw in the map walls (to hide overspray)
if self._map.walls:
base = Image.alpha_composite(base, self._map.walls)
#draw the roomba and any problems
base = self._draw_roomba(base)
#finally, draw the text
#base = self._draw_text(base)
#save the map
self._rendered_map = base
def _get_render_parameters(self) -> MapRenderParameters:
if self._map:
icon_set = self._map.icon_set
bg_color = self._map.bg_color
path_color = self._map.path_color
path_width = self._map.path_width
else:
icon_set = self._get_icon_set()
bg_color = DEFAULT_BG_COLOR
path_color = DEFAULT_PATH_COLOR
path_width = DEFAULT_PATH_WIDTH
if self.roomba.blid:
blid = self.roomba.blid
else:
blid = None
if self._device:
if self._device.blid:
blid = self._device.blid
if self._device.icon_set:
icon_set = self._device.icon_set
if self._device.bg_color:
bg_color = self._device.bg_color
if self._device.path_color:
path_color = self._device.path_color
if self._device.path_width:
path_width = self._device.path_width
return MapRenderParameters(
icon_set,
blid,
bg_color,
path_color,
path_width
)
def _map_blank_image(self, color=transparent) -> Image.Image:
return make_blank_image(self._map.img_width,self._map.img_height,color)
def _draw_vacuum_path(self, base: Image.Image) -> Image.Image:
if len(self._history_translated) > 1:
layer = self._map_blank_image()
renderer = ImageDraw.Draw(layer)
renderer.line(
list(map(lambda p: (p.x,p.y), self._history_translated)),
fill=self._render_params.path_color,
width=self._render_params.path_width,
joint="curve"
)
return Image.alpha_composite(base, layer)
else:
return base
def _get_icon_set(self):
#get the default (should always exist)
icon_set = self._icons["default"]
#attempt to get the series specific set
if self.roomba and self.roomba.sku:
series = self._icons.get(self.roomba.sku[0],None)
if series:
icon_set = series
#override with the map set if needed
if self._render_params and self._render_params.icon_set:
try:
icon_set = self._icons[self._render_params.icon_set]
except:
self.log.warn(f"Could not load icon set '{self._render_params.icon_set}' for map.")
return icon_set
def _draw_roomba(self, | |
"key": "STRING_VARIABLE", "type": "string", "value": "Variation-1 string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 456},
],
"isFeatureEnabled": True,
},
{
"id": "3",
"name": "Variation-2",
"weight": 30,
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "Variation-2 string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 789},
],
"isFeatureEnabled": True,
},
{
"id": "4",
"name": "Variation-3",
"weight": 40,
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "Variation-3 string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 100},
],
"isFeatureEnabled": True,
},
],
"id": 22,
"name": "Campaign-22",
"percentTraffic": 0,
"key": "FT_T_0_W_10_20_30_40",
"status": "RUNNING",
"type": "FEATURE_TEST",
}
],
"accountId": 123456,
"version": 2,
},
"FT_T_25_W_10_20_30_40": {
"sdkKey": "someuniquestuff1234567",
"campaigns": [
{
"goals": [{"identifier": "FEATURE_TEST_GOAL", "id": 203, "type": "CUSTOM_GOAL"}],
"variations": [
{
"id": "1",
"name": "Control",
"weight": 10,
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "Control string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 123},
],
"isFeatureEnabled": False,
},
{
"id": "2",
"name": "Variation-1",
"weight": 20,
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "Variation-1 string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 456},
],
"isFeatureEnabled": True,
},
{
"id": "3",
"name": "Variation-2",
"weight": 30,
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "Variation-2 string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 789},
],
"isFeatureEnabled": True,
},
{
"id": "4",
"name": "Variation-3",
"weight": 40,
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "Variation-3 string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 100},
],
"isFeatureEnabled": True,
},
],
"id": 22,
"name": "Campaign-22",
"percentTraffic": 25,
"key": "FT_T_25_W_10_20_30_40",
"status": "RUNNING",
"type": "FEATURE_TEST",
}
],
"accountId": 123456,
"version": 2,
},
"FT_T_50_W_10_20_30_40": {
"sdkKey": "someuniquestuff1234567",
"campaigns": [
{
"goals": [{"identifier": "FEATURE_TEST_GOAL", "id": 203, "type": "CUSTOM_GOAL"}],
"variations": [
{
"id": "1",
"name": "Control",
"weight": 10,
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "Control string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 123},
],
"isFeatureEnabled": False,
},
{
"id": "2",
"name": "Variation-1",
"weight": 20,
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "Variation-1 string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 456},
],
"isFeatureEnabled": True,
},
{
"id": "3",
"name": "Variation-2",
"weight": 30,
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "Variation-2 string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 789},
],
"isFeatureEnabled": True,
},
{
"id": "4",
"name": "Variation-3",
"weight": 40,
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "Variation-3 string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 100},
],
"isFeatureEnabled": True,
},
],
"id": 22,
"name": "Campaign-22",
"percentTraffic": 50,
"key": "FT_T_50_W_10_20_30_40",
"status": "RUNNING",
"type": "FEATURE_TEST",
}
],
"accountId": 123456,
"version": 2,
},
"FT_T_75_W_10_20_30_40": {
"sdkKey": "someuniquestuff1234567",
"campaigns": [
{
"goals": [{"identifier": "FEATURE_TEST_GOAL", "id": 203, "type": "CUSTOM_GOAL"}],
"variations": [
{
"id": "1",
"name": "Control",
"weight": 10,
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "Control string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 123},
],
"isFeatureEnabled": False,
},
{
"id": "2",
"name": "Variation-1",
"weight": 20,
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "Variation-1 string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 456},
],
"isFeatureEnabled": True,
},
{
"id": "3",
"name": "Variation-2",
"weight": 30,
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "Variation-2 string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 789},
],
"isFeatureEnabled": True,
},
{
"id": "4",
"name": "Variation-3",
"weight": 40,
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "Variation-3 string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 100},
],
"isFeatureEnabled": True,
},
],
"id": 22,
"name": "Campaign-22",
"percentTraffic": 75,
"key": "FT_T_75_W_10_20_30_40",
"status": "RUNNING",
"type": "FEATURE_TEST",
}
],
"accountId": 123456,
"version": 2,
},
"FT_T_100_W_10_20_30_40": {
"sdkKey": "someuniquestuff1234567",
"campaigns": [
{
"goals": [{"identifier": "FEATURE_TEST_GOAL", "id": 203, "type": "CUSTOM_GOAL"}],
"variations": [
{
"id": "1",
"name": "Control",
"weight": 10,
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "Control string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 123},
],
"isFeatureEnabled": False,
},
{
"id": "2",
"name": "Variation-1",
"weight": 20,
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "Variation-1 string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 456},
],
"isFeatureEnabled": True,
},
{
"id": "3",
"name": "Variation-2",
"weight": 30,
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "Variation-2 string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 789},
],
"isFeatureEnabled": True,
},
{
"id": "4",
"name": "Variation-3",
"weight": 40,
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "Variation-3 string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 100},
],
"isFeatureEnabled": True,
},
],
"id": 22,
"name": "Campaign-22",
"percentTraffic": 100,
"key": "FT_T_100_W_10_20_30_40",
"status": "RUNNING",
"type": "FEATURE_TEST",
}
],
"accountId": 123456,
"version": 2,
},
"FT_T_100_W_10_20_30_40_IFEF": {
"sdkKey": "someuniquestuff1234567",
"campaigns": [
{
"goals": [{"identifier": "FEATURE_TEST_GOAL", "id": 203, "type": "CUSTOM_GOAL"}],
"variations": [
{
"id": "1",
"name": "Control",
"weight": 10,
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "Control string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 123},
],
"isFeatureEnabled": False,
},
{
"id": "2",
"name": "Variation-1",
"weight": 20,
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "Variation-1 string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 456},
],
"isFeatureEnabled": False,
},
{
"id": "3",
"name": "Variation-2",
"weight": 30,
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "Variation-2 string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 789},
],
"isFeatureEnabled": True,
},
{
"id": "4",
"name": "Variation-3",
"weight": 40,
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "Variation-3 string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 100},
],
"isFeatureEnabled": False,
},
],
"id": 22,
"name": "Campaign-22",
"percentTraffic": 100,
"key": "FT_T_100_W_10_20_30_40_IFEF",
"status": "RUNNING",
"type": "FEATURE_TEST",
}
],
"accountId": 123456,
"version": 2,
},
"NEW_SETTINGS_FILE": {
"campaigns": [
{
"goals": [],
"variations": [{"id": "1", "name": "Control", "weight": 100}],
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "d1"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 123},
],
"id": 29,
"name": "Campaign-29",
"percentTraffic": 50,
"key": "FEATURE_ROLLOUT_KEY",
"status": "RUNNING",
"type": "FEATURE_ROLLOUT",
},
{
"goals": [{"identifier": "FEATURE_TEST_GOAL", "id": 203, "type": "CUSTOM_GOAL"}],
"variations": [
{
"id": "1",
"name": "Control",
"weight": 50,
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "d2"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 321},
],
"isFeatureEnabled": False,
},
{
"id": "2",
"name": "Variation-1",
"weight": 50,
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "d1"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 123},
],
"isFeatureEnabled": True,
},
],
"id": 22,
"name": "Campaign-22",
"percentTraffic": 50,
"key": "FEATURE_TEST",
"status": "RUNNING",
"type": "FEATURE_TEST",
},
{
"goals": [{"identifier": "CUSTOM_RECOMMENDATION_AB_GOAL", "id": 203, "type": "CUSTOM_GOAL"}],
"variations": [
{"id": "1", "name": "Control", "weight": 40},
{"id": "2", "name": "Variation-1", "weight": 60},
],
"id": 22,
"name": "Campaign-22",
"percentTraffic": 90,
"key": "NEW_RECOMMENDATION_AB_CAMPAIGN",
"status": "RUNNING",
"type": "VISUAL_AB",
},
],
"accountId": 123456,
"version": 2,
},
"T_75_W_10_TIMES_10": {
"campaigns": [
{
"goals": [{"identifier": "CUSTOM", "id": 231, "type": "CUSTOM_GOAL"}],
"variations": [
{"id": 1, "name": "Control", "changes": {}, "weight": 10},
{"id": 2, "name": "Variation-1", "changes": {}, "weight": 10},
{"id": 3, "name": "Variation-2", "changes": {}, "weight": 10},
{"id": 4, "name": "Variation-3", "changes": {}, "weight": 10},
{"id": 5, "name": "Variation-4", "changes": {}, "weight": 10},
{"id": 6, "name": "Variation-5", "changes": {}, "weight": 10},
{"id": 7, "name": "Variation-6", "changes": {}, "weight": 10},
{"id": 8, "name": "Variation-7", "changes": {}, "weight": 10},
{"id": 9, "name": "Variation-8", "changes": {}, "weight": 10},
{"id": 10, "name": "Variation-9", "changes": {}, "weight": 10},
],
"id": 260,
"name": "Campaign-260",
"percentTraffic": 75,
"key": "T_75_W_10_TIMES_10",
"status": "RUNNING",
"type": "VISUAL_AB",
}
],
"accountId": 123456,
"version": 2,
},
"T_100_W_50_50_WS": {
"sdkKey": "some_unique_key",
"campaigns": [
{
"percentTraffic": 100,
"goals": [{"identifier": "ddd", "id": 453, "type": "CUSTOM_GOAL"}],
"variations": [
{"id": 1, "name": "Control", "changes": {}, "weight": 50},
{"id": 2, "name": "Variation-1", "changes": {}, "weight": 50},
],
"id": 174,
"name": "Campaign-174",
"segments": {
"and": [
{"or": [{"custom_variable": {"a": "wildcard(*123*)"}}]},
{"or": [{"custom_variable": {"hello": "regex(world)"}}]},
]
},
"key": "<KEY>",
"status": "RUNNING",
"type": "VISUAL_AB",
}
],
"accountId": 88888888,
"version": 1,
},
"T_50_W_50_50_WS": {
"sdkKey": "some_unique_key",
"campaigns": [
{
"percentTraffic": 50,
"goals": [{"identifier": "ddd", "id": 453, "type": "CUSTOM_GOAL"}],
"variations": [
{"id": 1, "name": "Control", "changes": {}, "weight": 50},
{"id": 2, "name": "Variation-1", "changes": {}, "weight": 50},
],
"id": 174,
"name": "Campaign-174",
"segments": {
"and": [
{"or": [{"custom_variable": {"a": "wildcard(*123*)"}}]},
{"or": [{"custom_variable": {"hello": "regex(world)"}}]},
| |
the network depth. The depth is given by the length of the argument
"layers".
'''
def __init__(self, channel, layers, kernel_size=3, in_planes=1, out_planes=1):
'''Initialization
Arguments:
channel: the channel number of the first layer, would also used
as the base of the following channels.
layers: a list of layer numbers. Each number represents the number
of convolutional layers of a stage. The stage numer, i.e.
the depth of the network is the length of this list.
Arguments (optional):
kernel_size: the kernel size of each block.
in_planes: the channel number of the input data.
out_planes: the channel number of the output data.
'''
super().__init__(2, channel=channel, layers=layers, kernel_size=kernel_size,
in_planes=in_planes, out_planes=out_planes)
class AE3d(_AENd):
'''3D convolutional auto-encoder.
This moule is a built-in model for convolutional auto-encoder.
The network would down-sample and up-sample and the input data according
to the network depth. The depth is given by the length of the argument
"layers".
'''
def __init__(self, channel, layers, kernel_size=3, in_planes=1, out_planes=1):
'''Initialization
Arguments:
channel: the channel number of the first layer, would also used
as the base of the following channels.
layers: a list of layer numbers. Each number represents the number
of convolutional layers of a stage. The stage numer, i.e.
the depth of the network is the length of this list.
Arguments (optional):
kernel_size: the kernel size of each block.
in_planes: the channel number of the input data.
out_planes: the channel number of the output data.
'''
super().__init__(3, channel=channel, layers=layers, kernel_size=kernel_size,
in_planes=in_planes, out_planes=out_planes)
class EncoderNet1d(_EncoderNetNd):
'''1D convolutional down-scale (encoder) network.
This moule is a built-in model for convolutional network. The network
could be used for down-scaling or classification.
The network would down-sample and the input data according to the network
depth. The depth is given by the length of the argument "layers".
'''
def __init__(self, channel, layers, kernel_size=3, in_planes=1, out_length=2):
'''Initialization
Arguments:
channel: the channel number of the first layer, would also used
as the base of the following channels.
layers: a list of layer numbers. Each number represents the number
of convolutional layers of a stage. The stage numer, i.e.
the depth of the network is the length of this list.
Arguments (optional):
kernel_size: the kernel size of each block.
in_planes: the channel number of the input data.
out_length: the length of the output vector, if not set, the
output would not be flattened.
'''
super().__init__(1, channel=channel, layers=layers, kernel_size=kernel_size,
in_planes=in_planes, out_length=out_length)
class EncoderNet2d(_EncoderNetNd):
'''2D convolutional down-scale (encoder) network.
This moule is a built-in model for convolutional network. The network
could be used for down-scaling or classification.
The network would down-sample and the input data according to the network
depth. The depth is given by the length of the argument "layers".
'''
def __init__(self, channel, layers, kernel_size=3, in_planes=1, out_length=2):
'''Initialization
Arguments:
channel: the channel number of the first layer, would also used
as the base of the following channels.
layers: a list of layer numbers. Each number represents the number
of convolutional layers of a stage. The stage numer, i.e.
the depth of the network is the length of this list.
Arguments (optional):
kernel_size: the kernel size of each block.
in_planes: the channel number of the input data.
out_length: the length of the output vector, if not set, the
output would not be flattened.
'''
super().__init__(2, channel=channel, layers=layers, kernel_size=kernel_size,
in_planes=in_planes, out_length=out_length)
class EncoderNet3d(_EncoderNetNd):
'''3D convolutional down-scale (encoder) network.
This moule is a built-in model for convolutional network. The network
could be used for down-scaling or classification.
The network would down-sample and the input data according to the network
depth. The depth is given by the length of the argument "layers".
'''
def __init__(self, channel, layers, kernel_size=3, in_planes=1, out_length=2):
'''Initialization
Arguments:
channel: the channel number of the first layer, would also used
as the base of the following channels.
layers: a list of layer numbers. Each number represents the number
of convolutional layers of a stage. The stage numer, i.e.
the depth of the network is the length of this list.
Arguments (optional):
kernel_size: the kernel size of each block.
in_planes: the channel number of the input data.
out_length: the length of the output vector, if not set, the
output would not be flattened.
'''
super().__init__(3, channel=channel, layers=layers, kernel_size=kernel_size,
in_planes=in_planes, out_length=out_length)
class DecoderNet1d(_DecoderNetNd):
'''1D convolutional up-scale (decoder) network.
This moule is a built-in model for convolutional network. The network
could be used for up-scaling or generating samples.
The network would up-sample and the input data according to the network
depth. The depth is given by the length of the argument "layers".
Different from the encoder network, this module requires the output
shape, and the input shape is inferred from the given output shape.
'''
def __init__(self, channel, layers, out_size, kernel_size=3, in_length=2, out_planes=1):
'''Initialization
Arguments:
channel: the channel number of the first layer, would also used
as the base of the following channels.
layers: a list of layer numbers. Each number represents the number
of convolutional layers of a stage. The stage numer, i.e.
the depth of the network is the length of this list.
out_size: the shape of the output data (without the sample and
channel dimension). This argument is required, because
the shape of the first layer is inferred from it.
Arguments (optional):
kernel_size: the kernel size of each block.
in_length: the length of the input vector, if not set, the
input requires to be a specific shape tensor. Use
the self.in_shape property to get it.
out_planes: the channel number of the output data.
'''
super().__init__(1, channel=channel, layers=layers, out_size=out_size,
kernel_size=kernel_size, in_length=in_length, out_planes=out_planes)
class DecoderNet2d(_DecoderNetNd):
'''2D convolutional up-scale (decoder) network.
This moule is a built-in model for convolutional network. The network
could be used for up-scaling or generating samples.
The network would up-sample and the input data according to the network
depth. The depth is given by the length of the argument "layers".
Different from the encoder network, this module requires the output
shape, and the input shape is inferred from the given output shape.
'''
def __init__(self, channel, layers, out_size, kernel_size=3, in_length=2, out_planes=1):
'''Initialization
Arguments:
channel: the channel number of the first layer, would also used
as the base of the following channels.
layers: a list of layer numbers. Each number represents the number
of convolutional layers of a stage. The stage numer, i.e.
the depth of the network is the length of this list.
out_size: the shape of the output data (without the sample and
channel dimension). This argument is required, because
the shape of the first layer is inferred from it.
Arguments (optional):
kernel_size: the kernel size of each block.
in_length: the length of the input vector, if not set, the
input requires to be a specific shape tensor. Use
the self.in_shape property to get it.
out_planes: the channel number of the output data.
'''
super().__init__(2, channel=channel, layers=layers, out_size=out_size,
kernel_size=kernel_size, in_length=in_length, out_planes=out_planes)
class DecoderNet3d(_DecoderNetNd):
'''3D convolutional up-scale (decoder) network.
This moule is a built-in model for convolutional network. The network
could be used for up-scaling or generating samples.
The network would up-sample and the input data according to the network
depth. The depth is given by the length of the argument "layers".
Different from the encoder network, this module requires the output
shape, and the input shape is inferred from the given output shape.
'''
def __init__(self, channel, layers, out_size, kernel_size=3, in_length=2, out_planes=1):
'''Initialization
Arguments:
channel: the channel number of the first layer, would also used
as the base of the following channels.
layers: a list of layer numbers. Each number represents the number
of convolutional layers of a stage. The stage numer, i.e.
the depth of the network is the length of this list.
out_size: the shape of the output data (without the sample and
channel dimension). This argument is required, because
the shape of the first layer is inferred from it.
Arguments (optional):
kernel_size: the kernel size of each block.
in_length: the length of the input vector, if | |
<filename>fhir/resources/messageheader.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/MessageHeader
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
from typing import Any, Dict
from typing import List as ListType
from pydantic import Field, root_validator
from . import backboneelement, domainresource, fhirtypes
class MessageHeader(domainresource.DomainResource):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
A resource that describes a message that is exchanged between systems.
The header for a message exchange that is either requesting or responding
to an action. The reference(s) that are the subject of the action as well
as other information related to the action are typically transmitted in a
bundle in which the MessageHeader resource instance is the first resource
in the bundle.
"""
resource_type = Field("MessageHeader", const=True)
author: fhirtypes.ReferenceType = Field(
None,
alias="author",
title="The source of the decision",
description=(
"The logical author of the message - the person or device that decided "
"the described event should happen. When there is more than one "
"candidate, pick the most proximal to the MessageHeader. Can provide "
"other authors in extensions."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Practitioner", "PractitionerRole"],
)
definition: fhirtypes.Canonical = Field(
None,
alias="definition",
title="Link to the definition for this message",
description="Permanent link to the MessageDefinition for this message.",
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["MessageDefinition"],
)
definition__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_definition", title="Extension field for ``definition``."
)
destination: ListType[fhirtypes.MessageHeaderDestinationType] = Field(
None,
alias="destination",
title="Message destination application(s)",
description="The destination application which the message is intended for.",
# if property is element of this resource.
element_property=True,
)
enterer: fhirtypes.ReferenceType = Field(
None,
alias="enterer",
title="The source of the data entry",
description=(
"The person or device that performed the data entry leading to this "
"message. When there is more than one candidate, pick the most proximal"
" to the message. Can provide other enterers in extensions."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Practitioner", "PractitionerRole"],
)
eventCoding: fhirtypes.CodingType = Field(
None,
alias="eventCoding",
title="Code for the event this message represents or link to event definition",
description=(
"Code that identifies the event this message represents and connects it"
" with its definition. Events defined as part of the FHIR specification"
' have the system value "http://terminology.hl7.org/CodeSystem/message-'
'events". Alternatively uri to the EventDefinition.'
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e event[x]
one_of_many="event",
one_of_many_required=True,
)
eventUri: fhirtypes.Uri = Field(
None,
alias="eventUri",
title="Code for the event this message represents or link to event definition",
description=(
"Code that identifies the event this message represents and connects it"
" with its definition. Events defined as part of the FHIR specification"
' have the system value "http://terminology.hl7.org/CodeSystem/message-'
'events". Alternatively uri to the EventDefinition.'
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e event[x]
one_of_many="event",
one_of_many_required=True,
)
eventUri__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_eventUri", title="Extension field for ``eventUri``."
)
focus: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="focus",
title="The actual content of the message",
description=(
"The actual data of the message - a reference to the root/focus class "
"of the event."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Resource"],
)
reason: fhirtypes.CodeableConceptType = Field(
None,
alias="reason",
title="Cause of event",
description=(
"Coded indication of the cause for the event - indicates a reason for "
"the occurrence of the event that is a focus of this message."
),
# if property is element of this resource.
element_property=True,
)
response: fhirtypes.MessageHeaderResponseType = Field(
None,
alias="response",
title="If this is a reply to prior message",
description=(
"Information about the message that this message is a response to. "
"Only present if this message is a response."
),
# if property is element of this resource.
element_property=True,
)
responsible: fhirtypes.ReferenceType = Field(
None,
alias="responsible",
title="Final responsibility for event",
description=(
"The person or organization that accepts overall responsibility for the"
" contents of the message. The implication is that the message event "
"happened under the policies of the responsible party."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Practitioner", "PractitionerRole", "Organization"],
)
sender: fhirtypes.ReferenceType = Field(
None,
alias="sender",
title="Real world sender of the message",
description=(
"Identifies the sending system to allow the use of a trust " "relationship."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Practitioner", "PractitionerRole", "Organization"],
)
source: fhirtypes.MessageHeaderSourceType = Field(
...,
alias="source",
title="Message source application",
description="The source application from which this message originated.",
# if property is element of this resource.
element_property=True,
)
@root_validator(pre=True)
def validate_one_of_many(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
"""
one_of_many_fields = {"event": ["eventCoding", "eventUri"]}
for prefix, fields in one_of_many_fields.items():
assert cls.__fields__[fields[0]].field_info.extra["one_of_many"] == prefix
required = (
cls.__fields__[fields[0]].field_info.extra["one_of_many_required"]
is True
)
found = False
for field in fields:
if field in values and values[field] is not None:
if found is True:
raise ValueError(
"Any of one field value is expected from "
f"this list {fields}, but got multiple!"
)
else:
found = True
if required is True and found is False:
raise ValueError(f"Expect any of field value from this list {fields}.")
return values
class MessageHeaderDestination(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Message destination application(s).
The destination application which the message is intended for.
"""
resource_type = Field("MessageHeaderDestination", const=True)
endpoint: fhirtypes.Url = Field(
...,
alias="endpoint",
title="Actual destination address or id",
description="Indicates where the message should be routed to.",
# if property is element of this resource.
element_property=True,
)
endpoint__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_endpoint", title="Extension field for ``endpoint``."
)
name: fhirtypes.String = Field(
None,
alias="name",
title="Name of system",
description="Human-readable name for the target system.",
# if property is element of this resource.
element_property=True,
)
name__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_name", title="Extension field for ``name``."
)
receiver: fhirtypes.ReferenceType = Field(
None,
alias="receiver",
title='Intended "real-world" recipient for the data',
description=(
"Allows data conveyed by a message to be addressed to a particular "
"person or department when routing to a specific application isn't "
"sufficient."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Practitioner", "PractitionerRole", "Organization"],
)
target: fhirtypes.ReferenceType = Field(
None,
alias="target",
title="Particular delivery destination within the destination",
description=(
"Identifies the target end system in situations where the initial "
"message transmission is to an intermediary system."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Device"],
)
class MessageHeaderResponse(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
If this is a reply to prior message.
Information about the message that this message is a response to. Only
present if | |
will generate a packetSent notification with the calllbackId. The application is responsible for constructing a valid 6LoWPAN packet. The packet is sent to the mote best-effort, so the application should deal with responses and timeouts, if any.
#
# The sendIP command should be used by applications that require end-to-end IP connectivity. For applications that do not require end-to-end IP connectivity, the sendData command provides a simpler interface without requiring the application to understand 6LoWPAN encapsulation. For a more comprehensive discussion of the distinction, see the SmartMesh IP Network User Guide.
#
# \param macAddress 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# \param priority 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: Low
# - 1: Medium
# - 2: High
# \param options 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param encryptedOffset 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param data None-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_sendIP named tuple.
#
def dn_sendIP(self, macAddress, priority, options, encryptedOffset, data) :
res = IpMgrConnectorMuxInternal.send(self, ['sendIP'], {"macAddress" : macAddress, "priority" : priority, "options" : options, "encryptedOffset" : encryptedOffset, "data" : data})
return IpMgrConnectorMux.Tuple_dn_sendIP(**res)
##
# The named tuple returned by the dn_restoreFactoryDefaults() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
#
Tuple_dn_restoreFactoryDefaults = collections.namedtuple("Tuple_dn_restoreFactoryDefaults", ['RC'])
##
# The restoreFactoryDefaults command restores the default configuration and clears the ACL. This change is persistent.
#
# For Manager versions <1.3.0 that required a license, the license used to enable optional features is preserved during a restore.
#
#
#
# \returns The response to the command, formatted as a #Tuple_dn_restoreFactoryDefaults named tuple.
#
def dn_restoreFactoryDefaults(self, ) :
res = IpMgrConnectorMuxInternal.send(self, ['restoreFactoryDefaults'], {})
return IpMgrConnectorMux.Tuple_dn_restoreFactoryDefaults(**res)
##
# The named tuple returned by the dn_getMoteInfo() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - <tt>macAddress</tt>: 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>state</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: lost
# - 1: negotiating
# - 4: operational
# - <tt>numNbrs</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>numGoodNbrs</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>requestedBw</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>totalNeededBw</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>assignedBw</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>packetsReceived</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>packetsLost</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>avgLatency</tt>: 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
Tuple_dn_getMoteInfo = collections.namedtuple("Tuple_dn_getMoteInfo", ['RC', 'macAddress', 'state', 'numNbrs', 'numGoodNbrs', 'requestedBw', 'totalNeededBw', 'assignedBw', 'packetsReceived', 'packetsLost', 'avgLatency'])
##
# The getMoteInfo command returns dynamic information for the specified mote.
#
# \param macAddress 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_getMoteInfo named tuple.
#
def dn_getMoteInfo(self, macAddress) :
res = IpMgrConnectorMuxInternal.send(self, ['getMoteInfo'], {"macAddress" : macAddress})
return IpMgrConnectorMux.Tuple_dn_getMoteInfo(**res)
##
# The named tuple returned by the dn_getNetworkConfig() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 1: RC_INVALID_COMMAND
# - 2: RC_INVALID_ARGUMENT
# - 11: RC_END_OF_LIST
# - 12: RC_NO_RESOURCES
# - 13: RC_IN_PROGRESS
# - 14: RC_NACK
# - 15: RC_WRITE_FAIL
# - 16: RC_VALIDATION_ERROR
# - 17: RC_INV_STATE
# - 18: RC_NOT_FOUND
# - <tt>networkId</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>apTxPower</tt>: 1-byte field formatted as a ints.<br/>
# There is no restriction on the value of this field.
# - <tt>frameProfile</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 1: Profile_01
# - <tt>maxMotes</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>baseBandwidth</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>downFrameMultVal</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>numParents</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>ccaMode</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: off
# - 1: energy
# - 2: carrier
# - 3: both
# - <tt>channelList</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>autoStartNetwork</tt>: 1-byte field formatted as a bool.<br/>
# There is no restriction on the value of this field.
# - <tt>locMode</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>bbMode</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: off
# - 1: upstream
# - 2: bidirectional
# - <tt>bbSize</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>isRadioTest</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>bwMult</tt>: 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>oneChannel</tt>: 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
Tuple_dn_getNetworkConfig = collections.namedtuple("Tuple_dn_getNetworkConfig", ['RC', 'networkId', 'apTxPower', 'frameProfile', 'maxMotes', 'baseBandwidth', 'downFrameMultVal', 'numParents', 'ccaMode', 'channelList', 'autoStartNetwork', 'locMode', 'bbMode', 'bbSize', 'isRadioTest', 'bwMult', 'oneChannel'])
##
# The getNetworkConfig command returns general network configuration parameters, including the Network ID, bandwidth parameters and number of motes.
#
#
#
# \returns The response to the command, formatted as a #Tuple_dn_getNetworkConfig named tuple.
#
def dn_getNetworkConfig(self, ) :
res = IpMgrConnectorMuxInternal.send(self, ['getNetworkConfig'], {})
return IpMgrConnectorMux.Tuple_dn_getNetworkConfig(**res)
##
# The named | |
import numpy as np
import sys
import scipy.interpolate as interpolate
import asdf
from .function import *
from .basic_func import Basic
class Func:
'''
The list of (possible) `Func` attributes is given below:
Attributes
----------
'''
def __init__(self, MB, dust_model=0):
'''
Parameters
----------
dust_model : int
0 for Calzetti.
'''
self.ID = MB.ID
self.ZZ = MB.Zall
self.age = MB.age
self.AA = MB.nage
self.tau0 = MB.tau0
self.MB = MB
self.dust_model = dust_model
self.DIR_TMP = MB.DIR_TMP
if MB.f_dust:
self.Temp = MB.Temp
try:
self.filts = MB.filts
self.DIR_FIL = MB.DIR_FILT
except:
pass
# Already Read or not;
self.f_af = False
self.f_af0 = False
def demo(self):
ZZ = self.ZZ
AA = self.AA
return ZZ, AA
#############################
# Load template in obs range.
#############################
def open_spec_fits(self, fall=0, orig=False):
'''
'''
ID0 = self.MB.ID
tau0= self.MB.tau0 #[0.01,0.02,0.03]
from astropy.io import fits
ZZ = self.ZZ
AA = self.AA
bfnc = self.MB.bfnc #Basic(ZZ)
# ASDF;
if fall == 0:
app = ''
hdu0 = self.MB.af['spec']
elif fall == 1:
app = 'all_'
hdu0 = self.MB.af['spec_full']
DIR_TMP = self.DIR_TMP
for pp in range(len(tau0)):
for zz in range(len(ZZ)):
Z = ZZ[zz]
NZ = bfnc.Z2NZ(Z)
if zz == 0 and pp == 0:
nr = hdu0['colnum']
xx = hdu0['wavelength']
lib = np.zeros((len(nr), 2+len(AA)*len(ZZ)*len(tau0)), dtype='float')
lib[:,0] = nr[:]
lib[:,1] = xx[:]
for aa in range(len(AA)):
coln = int(2 + aa)
if orig:
colname = 'fspec_orig_' + str(zz) + '_' + str(aa) + '_' + str(pp)
else:
colname = 'fspec_' + str(zz) + '_' + str(aa) + '_' + str(pp)
colnall = int(2 + pp*len(ZZ)*len(AA) + zz*len(AA) + aa) # 2 takes account of wavelength and AV columns.
lib[:,colnall] = hdu0[colname]
return lib
def open_spec_dust_fits(self, fall=0):
'''
Loads dust template in obs range.
'''
ID0 = self.MB.ID
tau0= self.MB.tau0 #[0.01,0.02,0.03]
from astropy.io import fits
ZZ = self.ZZ
AA = self.AA
bfnc = self.MB.bfnc #Basic(ZZ)
self.MB.af = asdf.open(self.DIR_TMP + 'spec_all_' + self.ID + '.asdf')
self.MB.af0 = asdf.open(self.DIR_TMP + 'spec_all.asdf')
if fall == 0:
app = ''
hdu0 = self.MB.af['spec_dust']
elif fall == 1:
app = 'all_'
hdu0 = self.MB.af['spec_dust_full']
DIR_TMP = self.DIR_TMP
nr = hdu0['colnum']
xx = hdu0['wavelength']
lib = np.zeros((len(nr), 2+len(self.Temp)), dtype='float')
lib[:,0] = nr[:]
lib[:,1] = xx[:]
for aa in range(len(self.Temp)):
coln = int(2 + aa)
colname = 'fspec_' + str(aa)
colnall = int(2 + aa) # 2 takes account of wavelength and AV columns.
lib[:,colnall] = hdu0[colname]
if fall==1 and False:
import matplotlib.pyplot as plt
plt.close()
plt.plot(lib[:,1],lib[:,coln],linestyle='-')
plt.show()
return lib
def open_spec_fits_dir(self, nage, nz, kk, Av00, zgal, A00):
'''
Load template in obs range.
But for weird template.
'''
from astropy.io import fits
tau0= self.tau0 #[0.01,0.02,0.03]
ZZ = self.ZZ
AA = self.AA
bfnc = self.MB.bfnc #Basic(ZZ)
self.MB.af = asdf.open(self.DIR_TMP + 'spec_all_' + self.ID + '.asdf')
self.MB.af0 = asdf.open(self.DIR_TMP + 'spec_all.asdf')
app = 'all'
hdu0 = self.MB.af['spec_full']
DIR_TMP = self.DIR_TMP #'./templates/'
pp = 0
zz = nz
# Luminosity
mshdu = self.MB.af0['ML']
Ls = mshdu['Ls_%d'%nz]
xx = hdu0['wavelength'] # at RF;
nr = np.arange(0,len(xx),1) #hdu0.data['colnum']
lib = np.zeros((len(nr), 2+1), dtype='float')
lib[:,0] = nr[:]
lib[:,1] = xx[:]
aa = nage
coln = int(2 + aa)
colname = 'fspec_' + str(zz) + '_' + str(aa) + '_' + str(pp)
yy0 = hdu0[colname]/Ls[aa]
yy = flamtonu(xx, yy0)
lib[:,2] = yy[:]
if self.dust_model == 0: # Calzetti
yyd, xxd, nrd = dust_calz(xx, yy, Av00, nr)
elif self.dust_model == 1: # MW
yyd, xxd, nrd = dust_mw(xx, yy, Av00, nr)
elif self.dust_model == 2: # LMC
yyd, xxd, nrd = dust_gen(xx, yy, Av00, nr, Rv=4.05, gamma=-0.06, Eb=2.8)
elif self.dust_model == 3: # SMC
yyd, xxd, nrd = dust_gen(xx, yy, Av00, nr, Rv=4.05, gamma=-0.42, Eb=0.0)
elif self.dust_model == 4: # Kriek&Conroy with gamma=-0.2
yyd, xxd, nrd = dust_kc(xx, yy, Av00, nr, Rv=4.05, gamma=-0.2)
else:
print('No entry. Dust model is set to Calzetti')
yyd, xxd, nrd = dust_calz(xx, yy, Av00, nr)
xxd *= (1.+zgal)
nrd_yyd = np.zeros((len(nrd),3), dtype='float')
nrd_yyd[:,0] = nrd[:]
nrd_yyd[:,1] = yyd[:]
nrd_yyd[:,2] = xxd[:]
b = nrd_yyd
nrd_yyd_sort = b[np.lexsort(([-1,1]*b[:,[1,0]]).T)]
yyd_sort = nrd_yyd_sort[:,1]
xxd_sort = nrd_yyd_sort[:,2]
return A00 * yyd_sort, xxd_sort
def get_template(self, lib, Amp=1.0, T=1.0, Av=0.0, Z=0.0, zgal=1.0, f_bb=False):
'''
Gets an element template given a set of parameters.
Not necessarily the most efficient way, but easy to use.
Parameters:
-----------
lib : dict
library dictionary.
Amp : float
Amplitude of the target template. Note that each template has Lbol = 1e10Lsun.
T : float
Age, in Gyr.
Av : float
Dust attenuation, in mag.
Z : float
Metallicity, in log(Z/Zsun).
zgal : float
Redshift.
f_bb: bool
If calculate bb photometry for the spectrum requested.
Returns
flux : float array. Flux in Fnu.
wavelength : float array. Wave in AA.
lcen, lflux : , if f_bb==True.
'''
bfnc = self.MB.bfnc
DIR_TMP = self.MB.DIR_TMP
NZ = bfnc.Z2NZ(Z)
pp0 = np.random.uniform(low=0, high=len(self.tau0), size=(1,))
pp = int(pp0[0])
if pp>=len(self.tau0):
pp += -1
nmodel = np.argmin(np.abs(T-self.age[:]))
if T - self.age[nmodel] != 0:
print('T=%.2f is not found in age library. T=%.2f is used.'%(T,self.age[nmodel]))
coln= int(2 + pp*len(self.ZZ)*len(self.AA) + NZ*len(self.AA) + nmodel)
nr = lib[:, 0]
xx = lib[:, 1] # This is OBSERVED wavelength range at z=zgal
yy = lib[:, coln]
if self.dust_model == 0:
yyd, xxd, nrd = dust_calz(xx/(1.+zgal), yy, Av, nr)
elif self.dust_model == 1:
yyd, xxd, nrd = dust_mw(xx/(1.+zgal), yy, Av, nr)
elif self.dust_model == 2: # LMC
yyd, xxd, nrd = dust_gen(xx/(1.+zgal), yy, Av, nr, Rv=4.05, gamma=-0.06, Eb=2.8)
elif self.dust_model == 3: # SMC
yyd, xxd, nrd = dust_gen(xx/(1.+zgal), yy, Av, nr, Rv=4.05, gamma=-0.42, Eb=0.0)
elif self.dust_model == 4: # Kriek&Conroy with gamma=-0.2
yyd, xxd, nrd = dust_kc(xx/(1.+zgal), yy, Av, nr, Rv=4.05, gamma=-0.2)
else:
yyd, xxd, nrd = dust_calz(xx/(1.+zgal), yy, Av, nr)
xxd *= (1.+zgal)
nrd_yyd = np.zeros((len(nrd),3), dtype='float')
nrd_yyd[:,0] = nrd[:]
nrd_yyd[:,1] = yyd[:]
nrd_yyd[:,2] = xxd[:]
b = nrd_yyd
nrd_yyd_sort = b[np.lexsort(([-1,1]*b[:,[1,0]]).T)]
yyd_sort = nrd_yyd_sort[:,1]
xxd_sort = nrd_yyd_sort[:,2]
if f_bb:
#fil_cen, fil_flux = filconv(self.filts, xxd_sort, Amp * yyd_sort, self.DIR_FIL)
fil_cen, fil_flux = filconv_fast(self.MB, xxd_sort, Amp * yyd_sort)
return Amp * yyd_sort, xxd_sort, fil_flux, fil_cen
else:
return Amp * yyd_sort, xxd_sort
def tmp03(self, A00, Av00, nmodel, Z, zgal, lib):
'''
'''
tau0= self.tau0 #[0.01,0.02,0.03]
ZZ = self.ZZ
AA = self.AA
bfnc = self.MB.bfnc #Basic(ZZ)
DIR_TMP = self.MB.DIR_TMP #'./templates/'
NZ = bfnc.Z2NZ(Z)
pp0 = np.random.uniform(low=0, high=len(tau0), size=(1,))
pp = int(pp0[0])
if pp>=len(tau0):
pp += -1
coln= int(2 + pp*len(ZZ)*len(AA) + NZ*len(AA) + nmodel)
nr = lib[:,0]
xx = lib[:,1] # This is OBSERVED wavelength range at z=zgal
yy = lib[:,coln]
if self.dust_model == 0:
yyd, xxd, nrd = dust_calz(xx/(1.+zgal), yy, Av00, nr)
elif self.dust_model == 1:
yyd, xxd, nrd = dust_mw(xx/(1.+zgal), yy, Av00, nr)
elif self.dust_model == 2: # LMC
yyd, xxd, nrd = dust_gen(xx/(1.+zgal), yy, Av00, nr, Rv=4.05, gamma=-0.06, Eb=2.8)
elif self.dust_model == 3: # SMC
yyd, xxd, nrd = dust_gen(xx/(1.+zgal), yy, Av00, nr, Rv=4.05, gamma=-0.42, Eb=0.0)
elif self.dust_model == 4: # Kriek&Conroy with gamma=-0.2
yyd, xxd, nrd = dust_kc(xx/(1.+zgal), yy, Av00, nr, Rv=4.05, gamma=-0.2)
else:
yyd, xxd, nrd = dust_calz(xx/(1.+zgal), yy, Av00, nr)
xxd *= (1.+zgal)
nrd_yyd = np.zeros((len(nrd),3), dtype='float')
nrd_yyd[:,0] = nrd[:]
nrd_yyd[:,1] = yyd[:]
nrd_yyd[:,2] = xxd[:]
b = nrd_yyd
nrd_yyd_sort = b[np.lexsort(([-1,1]*b[:,[1,0]]).T)]
yyd_sort = nrd_yyd_sort[:,1]
xxd_sort = nrd_yyd_sort[:,2]
return A00 * yyd_sort, xxd_sort
def tmp04(self, par, f_Alog=True, nprec=1, f_val=False, lib_all=False, f_nrd=False):
'''
Makes model template with a given param set.
Also dust attenuation.
Parameters
----------
nprec : int
Precision when redshift is refined.
'''
ZZ = self.ZZ
AA = self.AA
bfnc = self.MB.bfnc
Mtot = 0
if f_val:
par = par.params
if self.MB.fzmc == 1:
try:
zmc = par['zmc'].value
except:
zmc = self.MB.zgal
else:
zmc = self.MB.zgal
pp = 0
# AV limit;
if par['Av'] < self.MB.Avmin:
par['Av'] = self.MB.Avmin
if par['Av'] > self.MB.Avmax:
par['Av'] = self.MB.Avmax
Av00 = par['Av']
for aa in range(len(AA)):
if self.MB.ZEVOL==1 or aa == 0:
Z = par['Z'+str(aa)]
NZ = bfnc.Z2NZ(Z)
else:
pass
# Check limit;
if par['A'+str(aa)] < self.MB.Amin:
par['A'+str(aa)] = self.MB.Amin
if par['A'+str(aa)] > self.MB.Amax:
par['A'+str(aa)] = | |
sum = "h1:3tMoCCfM7ppqsR0ptz/wi1impNpT7/9wQtMZ8lr1mCQ=",
version = "v1.4.0",
)
go_repository(
name = "com_github_gliderlabs_ssh",
importpath = "github.com/gliderlabs/ssh",
sum = "h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0=",
version = "v0.2.2",
)
go_repository(
name = "com_github_globalsign_mgo",
importpath = "github.com/globalsign/mgo",
sum = "h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is=",
version = "v0.0.0-20181015135952-eeefdecb41b8",
)
go_repository(
name = "com_github_go_check_check",
importpath = "github.com/go-check/check",
sum = "h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI=",
version = "v0.0.0-20180628173108-788fd7840127",
)
go_repository(
name = "com_github_go_critic_go_critic",
importpath = "github.com/go-critic/go-critic",
sum = "h1:3RJdgf6u4NZUumoP8nzbqiiNT8e1tC2Oc7jlgqre/IA=",
version = "v0.5.2",
)
go_repository(
name = "com_github_go_errors_errors",
importpath = "github.com/go-errors/errors",
sum = "h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=",
version = "v1.0.1",
)
go_repository(
name = "com_github_go_git_gcfg",
importpath = "github.com/go-git/gcfg",
sum = "h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4=",
version = "v1.5.0",
)
go_repository(
name = "com_github_go_git_go_billy_v5",
importpath = "github.com/go-git/go-billy/v5",
sum = "h1:7NQHvd9FVid8VL4qVUMm8XifBK+2xCoZ2lSk0agRrHM=",
version = "v5.0.0",
)
go_repository(
name = "com_github_go_git_go_git_fixtures_v4",
importpath = "github.com/go-git/go-git-fixtures/v4",
sum = "h1:PbKy9zOy4aAKrJ5pibIRpVO2BXnK1Tlcg+caKI7Ox5M=",
version = "v4.0.2-0.20200613231340-f56387b50c12",
)
go_repository(
name = "com_github_go_git_go_git_v5",
importpath = "github.com/go-git/go-git/v5",
sum = "h1:YPBLG/3UK1we1ohRkncLjaXWLW+HKp5QNM/jTli2JgI=",
version = "v5.2.0",
)
go_repository(
name = "com_github_go_gl_glfw",
importpath = "github.com/go-gl/glfw",
sum = "h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0=",
version = "v0.0.0-20190409004039-e6da0acd62b1",
)
go_repository(
name = "com_github_go_gl_glfw_v3_3_glfw",
importpath = "github.com/go-gl/glfw/v3.3/glfw",
sum = "h1:WtGNWLvXpe6ZudgnXrq0barxBImvnnJoMEhXAzcbM0I=",
version = "v0.0.0-20200222043503-6f7a984d4dc4",
)
go_repository(
name = "com_github_go_ini_ini",
importpath = "github.com/go-ini/ini",
sum = "h1:Mujh4R/dH6YL8bxuISne3xX2+qcQ9p0IxKAP6ExWoUo=",
version = "v1.25.4",
)
go_repository(
name = "com_github_go_kit_kit",
importpath = "github.com/go-kit/kit",
sum = "h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=",
version = "v0.9.0",
)
go_repository(
name = "com_github_go_logfmt_logfmt",
importpath = "github.com/go-logfmt/logfmt",
sum = "h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=",
version = "v0.4.0",
)
go_repository(
name = "com_github_go_logr_logr",
importpath = "github.com/go-logr/logr",
sum = "h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=",
version = "v0.2.0",
)
go_repository(
name = "com_github_go_martini_martini",
importpath = "github.com/go-martini/martini",
sum = "h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk=",
version = "v0.0.0-20170121215854-22fa46961aab",
)
go_repository(
name = "com_github_go_ole_go_ole",
importpath = "github.com/go-ole/go-ole",
sum = "h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E=",
version = "v1.2.1",
)
go_repository(
name = "com_github_go_openapi_analysis",
importpath = "github.com/go-openapi/analysis",
sum = "h1:5BHISBAXOc/aJK25irLZnx2D3s6WyYaY9D4gmuz9fdE=",
version = "v0.19.10",
)
go_repository(
name = "com_github_go_openapi_errors",
importpath = "github.com/go-openapi/errors",
sum = "h1:Sxpo9PjEHDzhs3FbnGNonvDgWcMW2U7wGTcDDSFSceM=",
version = "v0.20.0",
)
go_repository(
name = "com_github_go_openapi_jsonpointer",
importpath = "github.com/go-openapi/jsonpointer",
sum = "h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=",
version = "v0.19.3",
)
go_repository(
name = "com_github_go_openapi_jsonreference",
importpath = "github.com/go-openapi/jsonreference",
sum = "h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o=",
version = "v0.19.3",
)
go_repository(
name = "com_github_go_openapi_loads",
importpath = "github.com/go-openapi/loads",
sum = "h1:jZVYWawIQiA1NBnHla28ktg6hrcfTHsCE+3QLVRBIls=",
version = "v0.19.5",
)
go_repository(
name = "com_github_go_openapi_runtime",
importpath = "github.com/go-openapi/runtime",
sum = "h1:K/6PoVNj5WJXUnMk+VEbELeXjtBkCS1UxTDa04tdXE0=",
version = "v0.19.26",
)
go_repository(
name = "com_github_go_openapi_spec",
importpath = "github.com/go-openapi/spec",
sum = "h1:qAdZLh1r6QF/hI/gTq+TJTvsQUodZsM7KLqkAJdiJNg=",
version = "v0.19.8",
)
go_repository(
name = "com_github_go_openapi_strfmt",
importpath = "github.com/go-openapi/strfmt",
sum = "h1:FEv6Pt/V4wLwP4vOCZbWlpfmi8kj4UiRip34IDE6SGw=",
version = "v0.19.10",
)
go_repository(
name = "com_github_go_openapi_swag",
importpath = "github.com/go-openapi/swag",
sum = "h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng=",
version = "v0.19.14",
)
go_repository(
name = "com_github_go_openapi_validate",
importpath = "github.com/go-openapi/validate",
sum = "h1:mPLM/bfbd00PGOCJlU0yJL7IulkZ+q9VjPv7U11RMQQ=",
version = "v0.19.12",
)
go_repository(
name = "com_github_go_playground_locales",
importpath = "github.com/go-playground/locales",
sum = "h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=",
version = "v0.13.0",
)
go_repository(
name = "com_github_go_playground_universal_translator",
importpath = "github.com/go-playground/universal-translator",
sum = "h1:X++omBR/4cE2MNg91AoC3rmGrCjJ8eAeUP/K/EKx4DM=",
version = "v0.16.0",
)
go_repository(
name = "com_github_go_playground_validator",
importpath = "github.com/go-playground/validator",
sum = "h1:myhdWhx5UHvLXZ7maUcP0uYxyijMT+smaNAhARBVc9s=",
version = "v9.30.0+incompatible",
)
go_repository(
name = "com_github_go_redis_redis",
importpath = "github.com/go-redis/redis",
sum = "h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg=",
version = "v6.15.9+incompatible",
)
go_repository(
name = "com_github_go_redis_redis_extra_rediscmd_v8",
importpath = "github.com/go-redis/redis/extra/rediscmd/v8",
sum = "h1:kjH+FAEz3G8gq8zDBgoLSCZj19Z1SK4dg/VtD3QE1IE=",
version = "v8.8.2",
)
go_repository(
name = "com_github_go_redis_redis_extra_redisotel_v8",
importpath = "github.com/go-redis/redis/extra/redisotel/v8",
sum = "h1:0JlrPvtN5SjIe5gTmAsvIAIqn9FEGaVJgGOFg63Djgk=",
version = "v8.10.0",
)
go_repository(
name = "com_github_go_redis_redis_v8",
importpath = "github.com/go-redis/redis/v8",
sum = "h1:OZwrQKuZqdJ4QIM8wn8rnuz868Li91xA3J2DEq+TPGA=",
version = "v8.10.0",
)
go_repository(
name = "com_github_go_redsync_redsync_v4",
importpath = "github.com/go-redsync/redsync/v4",
replace = "github.com/bduffany/redsync/v4",
sum = "h1:wyBDr+ApDWybbLGMSsepl30KzPAl+HlIQMhJSoLdRKg=",
version = "v4.4.1-minimal",
)
go_repository(
name = "com_github_go_sql_driver_mysql",
importpath = "github.com/go-sql-driver/mysql",
sum = "h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=",
version = "v1.6.0",
)
go_repository(
name = "com_github_go_stack_stack",
importpath = "github.com/go-stack/stack",
sum = "h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=",
version = "v1.8.0",
)
go_repository(
name = "com_github_go_toolsmith_astcast",
importpath = "github.com/go-toolsmith/astcast",
sum = "h1:JojxlmI6STnFVG9yOImLeGREv8W2ocNUM+iOhR6jE7g=",
version = "v1.0.0",
)
go_repository(
name = "com_github_go_toolsmith_astcopy",
importpath = "github.com/go-toolsmith/astcopy",
sum = "h1:OMgl1b1MEpjFQ1m5ztEO06rz5CUd3oBv9RF7+DyvdG8=",
version = "v1.0.0",
)
go_repository(
name = "com_github_go_toolsmith_astequal",
importpath = "github.com/go-toolsmith/astequal",
sum = "h1:4zxD8j3JRFNyLN46lodQuqz3xdKSrur7U/sr0SDS/gQ=",
version = "v1.0.0",
)
go_repository(
name = "com_github_go_toolsmith_astfmt",
importpath = "github.com/go-toolsmith/astfmt",
sum = "h1:A0vDDXt+vsvLEdbMFJAUBI/uTbRw1ffOPnxsILnFL6k=",
version = "v1.0.0",
)
go_repository(
name = "com_github_go_toolsmith_astinfo",
importpath = "github.com/go-toolsmith/astinfo",
sum = "h1:wP6mXeB2V/d1P1K7bZ5vDUO3YqEzcvOREOxZPEu3gVI=",
version = "v0.0.0-20180906194353-9809ff7efb21",
)
go_repository(
name = "com_github_go_toolsmith_astp",
importpath = "github.com/go-toolsmith/astp",
sum = "h1:alXE75TXgcmupDsMK1fRAy0YUzLzqPVvBKoyWV+KPXg=",
version = "v1.0.0",
)
go_repository(
name = "com_github_go_toolsmith_pkgload",
importpath = "github.com/go-toolsmith/pkgload",
sum = "h1:4DFWWMXVfbcN5So1sBNW9+yeiMqLFGl1wFLTL5R0Tgg=",
version = "v1.0.0",
)
go_repository(
name = "com_github_go_toolsmith_strparse",
importpath = "github.com/go-toolsmith/strparse",
sum = "h1:Vcw78DnpCAKlM20kSbAyO4mPfJn/lyYA4BJUDxe2Jb4=",
version = "v1.0.0",
)
go_repository(
name = "com_github_go_toolsmith_typep",
importpath = "github.com/go-toolsmith/typep",
sum = "h1:8xdsa1+FSIH/RhEkgnD1j2CJOy5mNllW1Q9tRiYwvlk=",
version = "v1.0.2",
)
go_repository(
name = "com_github_go_xmlfmt_xmlfmt",
importpath = "github.com/go-xmlfmt/xmlfmt",
sum = "h1:khEcpUM4yFcxg4/FHQWkvVRmgijNXRfzkIDHh23ggEo=",
version = "v0.0.0-20191208150333-d5b6f63a941b",
)
go_repository(
name = "com_github_gobuffalo_attrs",
importpath = "github.com/gobuffalo/attrs",
sum = "h1:hSkbZ9XSyjyBirMeqSqUrK+9HboWrweVlzRNqoBi2d4=",
version = "v0.0.0-20190224210810-a9411de4debd",
)
go_repository(
name = "com_github_gobuffalo_depgen",
importpath = "github.com/gobuffalo/depgen",
sum = "h1:31atYa/UW9V5q8vMJ+W6wd64OaaTHUrCUXER358zLM4=",
version = "v0.1.0",
)
go_repository(
name = "com_github_gobuffalo_envy",
importpath = "github.com/gobuffalo/envy",
sum = "h1:GlXgaiBkmrYMHco6t4j7SacKO4XUjvh5pwXh0f4uxXU=",
version = "v1.7.0",
)
go_repository(
name = "com_github_gobuffalo_flect",
importpath = "github.com/gobuffalo/flect",
sum = "h1:3GQ53z7E3o00C/yy7Ko8VXqQXoJGLkrTQCLTF1EjoXU=",
version = "v0.1.3",
)
go_repository(
name = "com_github_gobuffalo_genny",
importpath = "github.com/gobuffalo/genny",
sum = "h1:iQ0D6SpNXIxu52WESsD+KoQ7af2e3nCfnSBoSF/hKe0=",
version = "v0.1.1",
)
go_repository(
name = "com_github_gobuffalo_gitgen",
importpath = "github.com/gobuffalo/gitgen",
sum = "h1:mSVZ4vj4khv+oThUfS+SQU3UuFIZ5Zo6UNcvK8E8Mz8=",
version = "v0.0.0-20190315122116-cc086187d211",
)
go_repository(
name = "com_github_gobuffalo_gogen",
importpath = "github.com/gobuffalo/gogen",
sum = "h1:dLg+zb+uOyd/mKeQUYIbwbNmfRsr9hd/WtYWepmayhI=",
version = "v0.1.1",
)
go_repository(
name = "com_github_gobuffalo_logger",
importpath = "github.com/gobuffalo/logger",
sum = "h1:8thhT+kUJMTMy3HlX4+y9Da+BNJck+p109tqqKp7WDs=",
version = "v0.0.0-20190315122211-86e12af44bc2",
)
go_repository(
name = "com_github_gobuffalo_mapi",
importpath = "github.com/gobuffalo/mapi",
sum = "h1:fq9WcL1BYrm36SzK6+aAnZ8hcp+SrmnDyAxhNx8dvJk=",
version = "v1.0.2",
)
go_repository(
name = "com_github_gobuffalo_packd",
importpath = "github.com/gobuffalo/packd",
sum = "h1:4sGKOD8yaYJ+dek1FDkwcxCHA40M4kfKgFHx8N2kwbU=",
version = "v0.1.0",
)
go_repository(
name = "com_github_gobuffalo_packr_v2",
importpath = "github.com/gobuffalo/packr/v2",
sum = "h1:Ir9W9XIm9j7bhhkKE9cokvtTl1vBm62A/fene/ZCj6A=",
version = "v2.2.0",
)
go_repository(
name = "com_github_gobuffalo_syncx",
importpath = "github.com/gobuffalo/syncx",
sum = "h1:tpom+2CJmpzAWj5/VEHync2rJGi+epHNIeRSWjzGA+4=",
version = "v0.0.0-20190224160051-33c29581e754",
)
go_repository(
name = "com_github_gobwas_glob",
importpath = "github.com/gobwas/glob",
sum = "h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=",
version = "v0.2.3",
)
go_repository(
name = "com_github_gobwas_httphead",
importpath = "github.com/gobwas/httphead",
sum = "h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0=",
version = "v0.0.0-20180130184737-2c6c146eadee",
)
go_repository(
name = "com_github_gobwas_pool",
importpath = "github.com/gobwas/pool",
sum = "h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8=",
version = "v0.2.0",
)
go_repository(
name = "com_github_gobwas_ws",
importpath = "github.com/gobwas/ws",
sum = "h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo=",
version = "v1.0.2",
)
go_repository(
name = "com_github_goccy_go_json",
importpath = "github.com/goccy/go-json",
sum = "h1:ulhbuNe1JqE68nMRXXTJRrUu0uhouf0VevLINxQq4Ec=",
version = "v0.7.10",
)
go_repository(
name = "com_github_godbus_dbus",
importpath = "github.com/godbus/dbus",
sum = "h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8=",
version = "v0.0.0-20190422162347-ade71ed3457e",
)
go_repository(
name = "com_github_godbus_dbus_v5",
importpath = "github.com/godbus/dbus/v5",
sum = "h1:ZqHaoEF7TBzh4jzPmqVhE/5A1z9of6orkAe5uHoAeME=",
version = "v5.0.3",
)
go_repository(
name = "com_github_gofrs_flock",
importpath = "github.com/gofrs/flock",
sum = "h1:MSdYClljsF3PbENUUEx85nkWfJSGfzYI9yEBZOJz6CY=",
version = "v0.8.0",
)
go_repository(
name = "com_github_gofrs_uuid",
importpath = "github.com/gofrs/uuid",
sum = "h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84=",
version = "v3.3.0+incompatible",
)
go_repository(
name = "com_github_gogo_googleapis",
build_file_proto_mode = "disable",
importpath = "github.com/gogo/googleapis",
sum = "h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0=",
version = "v1.4.1",
)
go_repository(
name = "com_github_gogo_protobuf",
importpath = "github.com/gogo/protobuf",
sum = "h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=",
version = "v1.3.2",
)
go_repository(
name = "com_github_gogo_status",
importpath = "github.com/gogo/status",
sum = "h1:+eIkrewn5q6b30y+g/BJINVVdi2xH7je5MPJ3ZPK3JA=",
version = "v1.1.0",
)
go_repository(
name = "com_github_golang_glog",
importpath = "github.com/golang/glog",
sum = "h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=",
version = "v0.0.0-20160126235308-23def4e6c14b",
)
go_repository(
name = "com_github_golang_groupcache",
importpath = "github.com/golang/groupcache",
sum = "h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=",
version = "v0.0.0-20200121045136-8c9f03a8e57e",
)
go_repository(
name = "com_github_golang_jwt_jwt",
importpath = "github.com/golang-jwt/jwt",
sum = "h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=",
version = "v3.2.2+incompatible",
)
go_repository(
name = "com_github_golang_jwt_jwt_v4",
importpath = "github.com/golang-jwt/jwt/v4",
sum = "h1:XUgk2Ex5veyVFVeLm0xhusUTQybEbexJXrvPNOKkSY0=",
version = "v4.1.0",
)
go_repository(
name = "com_github_golang_mock",
importpath = "github.com/golang/mock",
sum = "h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=",
version = "v1.6.0",
)
go_repository(
name = "com_github_golang_protobuf",
importpath = "github.com/golang/protobuf",
sum = "h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=",
version = "v1.5.2",
)
go_repository(
name = "com_github_golang_snappy",
importpath = "github.com/golang/snappy",
sum = "h1:vsBwDpPr442gOtutTvPdgJ0xhaUI/1uX53JosuHzIRE=",
version = "v0.0.4-0.20210502035320-33fc3d5d8d99",
)
go_repository(
name = "com_github_golangci_check",
importpath = "github.com/golangci/check",
sum = "h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0=",
version = "v0.0.0-20180506172741-cfe4005ccda2",
)
go_repository(
name = "com_github_golangci_dupl",
importpath = "github.com/golangci/dupl",
sum = "h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM=",
version = "v0.0.0-20180902072040-3e9179ac440a",
)
go_repository(
name = "com_github_golangci_errcheck",
importpath = "github.com/golangci/errcheck",
sum = "h1:YYWNAGTKWhKpcLLt7aSj/odlKrSrelQwlovBpDuf19w=",
version = "v0.0.0-20181223084120-ef45e06d44b6",
)
go_repository(
name = "com_github_golangci_go_misc",
importpath = "github.com/golangci/go-misc",
sum = "h1:9kfjN3AdxcbsZBf8NjltjWihK2QfBBBZuv91cMFfDHw=",
version = "v0.0.0-20180628070357-927a3d87b613",
)
go_repository(
name = "com_github_golangci_goconst",
importpath = "github.com/golangci/goconst",
sum = "h1:pe9JHs3cHHDQgOFXJJdYkK6fLz2PWyYtP4hthoCMvs8=",
version = "v0.0.0-20180610141641-041c5f2b40f3",
)
go_repository(
name = "com_github_golangci_gocyclo",
importpath = "github.com/golangci/gocyclo",
sum = "h1:pXTK/gkVNs7Zyy7WKgLXmpQ5bHTrq5GDsp8R9Qs67g0=",
version = "v0.0.0-20180528144436-0a533e8fa43d",
)
go_repository(
name = "com_github_golangci_gofmt",
importpath = "github.com/golangci/gofmt",
sum = "h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks=",
version = "v0.0.0-20190930125516-244bba706f1a",
)
go_repository(
name = "com_github_golangci_golangci_lint",
importpath = "github.com/golangci/golangci-lint",
sum = "h1:+m9I3LEmxXLpymkXRPkDQGzOVBmBYm16UtDiXqZxWek=",
version = "v1.31.0",
)
go_repository(
name = "com_github_golangci_ineffassign",
importpath = "github.com/golangci/ineffassign",
sum = "h1:gLLhTLMk2/SutryVJ6D4VZCU3CUqr8YloG7FPIBWFpI=",
version = "v0.0.0-20190609212857-42439a7714cc",
)
go_repository(
name = "com_github_golangci_lint_1",
importpath = "github.com/golangci/lint-1",
sum = "h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA=",
version = "v0.0.0-20191013205115-297bf364a8e0",
)
go_repository(
name = "com_github_golangci_maligned",
importpath = "github.com/golangci/maligned",
sum = "h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA=",
version = "v0.0.0-20180506175553-b1d89398deca",
)
go_repository(
name = "com_github_golangci_misspell",
importpath = "github.com/golangci/misspell",
sum = "h1:EL/O5HGrF7Jaq0yNhBLucz9hTuRzj2LdwGBOaENgxIk=",
version = "v0.0.0-20180809174111-950f5d19e770",
)
go_repository(
name = "com_github_golangci_prealloc",
importpath = "github.com/golangci/prealloc",
| |
name was incorrect. Theme was not deleted.'
)
assert Addon.objects.filter(id=theme.id).exists()
self.assert3xx(response, theme.get_dev_url('versions'))
class TestHome(TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super(TestHome, self).setUp()
assert self.client.login(email='<EMAIL>')
self.url = reverse('devhub.index')
self.addon = Addon.objects.get(pk=3615)
def get_pq(self):
response = self.client.get(self.url)
assert response.status_code == 200
return pq(response.content)
def test_basic_logged_out(self):
self.client.logout()
response = self.client.get(self.url)
assert response.status_code == 200
self.assertTemplateUsed(response, 'devhub/index.html')
assert b'Customize Firefox' in response.content
def test_default_lang_selected(self):
self.client.logout()
doc = self.get_pq()
selected_value = doc('#language option:selected').attr('value')
assert selected_value == 'en-us'
def test_basic_logged_in(self):
response = self.client.get(self.url)
assert response.status_code == 200
self.assertTemplateUsed(response, 'devhub/index.html')
assert b'My Add-ons' in response.content
def test_my_addons_addon_versions_link(self):
assert self.client.login(email='<EMAIL>')
doc = self.get_pq()
addon_list = doc('.DevHub-MyAddons-list')
href = addon_list.find('.DevHub-MyAddons-item-versions a').attr('href')
assert href == self.addon.get_dev_url('versions')
def test_my_addons(self):
statuses = [
(amo.STATUS_NOMINATED, amo.STATUS_AWAITING_REVIEW, 'Awaiting Review'),
(amo.STATUS_APPROVED, amo.STATUS_AWAITING_REVIEW, 'Approved'),
(amo.STATUS_DISABLED, amo.STATUS_APPROVED, 'Disabled by Mozilla'),
]
latest_version = self.addon.find_latest_version(amo.RELEASE_CHANNEL_LISTED)
latest_file = latest_version.files.all()[0]
for addon_status, file_status, status_str in statuses:
latest_file.update(status=file_status)
self.addon.update(status=addon_status)
doc = self.get_pq()
addon_item = doc('.DevHub-MyAddons-list .DevHub-MyAddons-item')
assert addon_item.length == 1
assert addon_item.find('.DevHub-MyAddons-item-edit').attr(
'href'
) == self.addon.get_dev_url('edit')
if self.addon.type != amo.ADDON_STATICTHEME:
assert self.addon.get_icon_url(64) in addon_item.html()
else:
assert self.addon.current_previews[0].thumbnail_url in (
addon_item.html()
)
assert (
status_str == addon_item.find('.DevHub-MyAddons-VersionStatus').text()
)
Addon.objects.all().delete()
assert self.get_pq()('.DevHub-MyAddons-list .DevHub-MyAddons-item').length == 0
def test_my_addons_recommended(self):
self.make_addon_promoted(self.addon, RECOMMENDED, approve_version=True)
latest_version = self.addon.find_latest_version(amo.RELEASE_CHANNEL_LISTED)
latest_file = latest_version.files.all()[0]
statuses = [
(amo.STATUS_NOMINATED, amo.STATUS_AWAITING_REVIEW, 'Awaiting Review'),
(
amo.STATUS_APPROVED,
amo.STATUS_AWAITING_REVIEW,
'Approved and Recommended',
),
(amo.STATUS_DISABLED, amo.STATUS_APPROVED, 'Disabled by Mozilla'),
]
for addon_status, file_status, status_str in statuses:
latest_file.update(status=file_status)
self.addon.update(status=addon_status)
doc = self.get_pq()
addon_item = doc('.DevHub-MyAddons-list .DevHub-MyAddons-item')
assert addon_item.length == 1
assert addon_item.find('.DevHub-MyAddons-item-edit').attr(
'href'
) == self.addon.get_dev_url('edit')
if self.addon.type != amo.ADDON_STATICTHEME:
assert self.addon.get_icon_url(64) in addon_item.html()
else:
assert self.addon.current_previews[0].thumbnail_url in (
addon_item.html()
)
assert (
status_str == addon_item.find('.DevHub-MyAddons-VersionStatus').text()
)
Addon.objects.all().delete()
assert self.get_pq()('.DevHub-MyAddons-list .DevHub-MyAddons-item').length == 0
def test_my_addons_with_static_theme(self):
self.addon.update(type=amo.ADDON_STATICTHEME)
VersionPreview.objects.create(version=self.addon.current_version)
self.test_my_addons()
def test_my_addons_incomplete(self):
self.addon.update(status=amo.STATUS_NULL)
# Make add-on incomplete
AddonCategory.objects.filter(addon=self.addon).delete()
doc = self.get_pq()
addon_item = doc('.DevHub-MyAddons-list .DevHub-MyAddons-item')
assert addon_item.length == 1
assert addon_item.find('.DevHub-MyAddons-item-edit').attr(
'href'
) == self.addon.get_dev_url('edit')
def test_my_addons_no_disabled_or_deleted(self):
self.addon.update(status=amo.STATUS_APPROVED, disabled_by_user=True)
doc = self.get_pq()
addon_item = doc('.DevHub-MyAddons-list .DevHub-MyAddons-item')
assert addon_item.length == 1
assert addon_item.find('.DevHub-MyAddons-VersionStatus').text() == 'Invisible'
class TestActivityFeed(TestCase):
fixtures = ('base/users', 'base/addon_3615')
def setUp(self):
super(TestActivityFeed, self).setUp()
assert self.client.login(email='<EMAIL>')
self.addon = Addon.objects.get(id=3615)
self.version = self.addon.versions.first()
self.action_user = UserProfile.objects.get(email='<EMAIL>')
ActivityLog.objects.all().delete()
def test_feed_for_all(self):
response = self.client.get(reverse('devhub.feed_all'))
assert response.status_code == 200
doc = pq(response.content)
assert doc('header h2').text() == 'Recent Activity for My Add-ons'
def test_feed_for_addon(self):
response = self.client.get(reverse('devhub.feed', args=[self.addon.slug]))
assert response.status_code == 200
doc = pq(response.content)
assert doc('header h2').text() == ('Recent Activity for %s' % self.addon.name)
def test_feed_disabled(self):
self.addon.update(status=amo.STATUS_DISABLED)
response = self.client.get(reverse('devhub.feed', args=[self.addon.slug]))
assert response.status_code == 200
def test_feed_disabled_anon(self):
self.client.logout()
response = self.client.get(reverse('devhub.feed', args=[self.addon.slug]))
assert response.status_code == 302
def add_log(self, action=amo.LOG.ADD_RATING):
core.set_user(self.action_user)
ActivityLog.create(action, self.addon, self.version)
def add_hidden_log(self, action=amo.LOG.COMMENT_VERSION):
self.add_log(action=action)
def test_feed_hidden(self):
self.add_hidden_log()
self.add_hidden_log(amo.LOG.OBJECT_ADDED)
res = self.client.get(reverse('devhub.feed', args=[self.addon.slug]))
doc = pq(res.content)
assert len(doc('#recent-activity li.item')) == 0
def test_addons_hidden(self):
self.add_hidden_log()
self.add_hidden_log(amo.LOG.OBJECT_ADDED)
res = self.client.get(reverse('devhub.addons'))
doc = pq(res.content)
assert len(doc('.recent-activity li.item')) == 0
def test_unlisted_addons_dashboard(self):
"""Unlisted addons are displayed in the feed on the dashboard page."""
self.make_addon_unlisted(self.addon)
self.add_log()
res = self.client.get(reverse('devhub.addons'))
doc = pq(res.content)
assert len(doc('.recent-activity li.item')) == 2
def test_unlisted_addons_feed_sidebar(self):
"""Unlisted addons are displayed in the left side in the feed page."""
self.make_addon_unlisted(self.addon)
self.add_log()
res = self.client.get(reverse('devhub.feed_all'))
doc = pq(res.content)
# First li is "All My Add-ons".
assert len(doc('#refine-addon li')) == 2
def test_unlisted_addons_feed(self):
"""Unlisted addons are displayed in the feed page."""
self.make_addon_unlisted(self.addon)
self.add_log()
res = self.client.get(reverse('devhub.feed_all'))
doc = pq(res.content)
assert len(doc('#recent-activity .item')) == 2
def test_unlisted_addons_feed_filter(self):
"""Feed page can be filtered on unlisted addon."""
self.make_addon_unlisted(self.addon)
self.add_log()
res = self.client.get(reverse('devhub.feed', args=[self.addon.slug]))
doc = pq(res.content)
assert len(doc('#recent-activity .item')) == 2
def test_reviewer_name_is_used_for_reviewer_actions(self):
self.action_user.update(display_name='HîdeMe', reviewer_name='ShöwMe')
self.add_log(action=amo.LOG.APPROVE_VERSION)
response = self.client.get(reverse('devhub.feed', args=[self.addon.slug]))
doc = pq(response.content)
assert len(doc('#recent-activity .item')) == 1
content = force_str(response.content)
assert self.action_user.reviewer_name in content
assert self.action_user.name not in content
def test_regular_name_is_used_for_non_reviewer_actions(self):
# Fields are inverted compared to the test above.
self.action_user.update(reviewer_name='HîdeMe', display_name='ShöwMe')
self.add_log(action=amo.LOG.ADD_RATING) # not a reviewer action.
response = self.client.get(reverse('devhub.feed', args=[self.addon.slug]))
doc = pq(response.content)
assert len(doc('#recent-activity .item')) == 1
content = force_str(response.content)
# Assertions are inverted compared to the test above.
assert self.action_user.reviewer_name not in content
assert self.action_user.name in content
def test_addons_dashboard_name(self):
self.add_log()
res = self.client.get(reverse('devhub.addons'))
doc = pq(res.content)
timestamp = doc('.recent-activity li.item span.activity-timestamp')
assert len(timestamp) == 1
assert self.action_user.name
assert self.action_user.name in timestamp.html()
assert '<a href=' not in timestamp.html()
def test_addons_dashboard_reviewer_name(self):
self.action_user.update(reviewer_name='bob')
self.add_log(action=amo.LOG.APPROVE_VERSION)
res = self.client.get(reverse('devhub.addons'))
doc = pq(res.content)
timestamp = doc('.recent-activity li.item span.activity-timestamp')
assert len(timestamp) == 1
assert self.action_user.name
assert self.action_user.name not in timestamp.html()
assert self.action_user.reviewer_name in timestamp.html()
assert '<a href=' not in timestamp.html()
class TestAPIAgreement(TestCase):
fixtures = ['base/addon_3615', 'base/addon_5579', 'base/users']
def setUp(self):
super(TestAPIAgreement, self).setUp()
assert self.client.login(email='<EMAIL>')
self.user = UserProfile.objects.get(email='<EMAIL>')
self.user.update(last_login_ip='192.168.1.1')
def test_agreement_read(self):
self.user.update(read_dev_agreement=self.days_ago(0))
response = self.client.get(reverse('devhub.api_key_agreement'))
self.assert3xx(response, reverse('devhub.api_key'))
def test_agreement_unread_captcha_inactive(self):
self.user.update(read_dev_agreement=None)
response = self.client.get(reverse('devhub.api_key_agreement'))
assert response.status_code == 200
assert 'agreement_form' in response.context
form = response.context['agreement_form']
assert 'recaptcha' not in form.fields
doc = pq(response.content)
assert doc('.g-recaptcha') == []
@override_switch('developer-agreement-captcha', active=True)
def test_agreement_unread_captcha_active(self):
self.user.update(read_dev_agreement=None)
response = self.client.get(reverse('devhub.api_key_agreement'))
assert response.status_code == 200
assert 'agreement_form' in response.context
form = response.context['agreement_form']
assert 'recaptcha' in form.fields
doc = pq(response.content)
assert doc('.g-recaptcha')
def test_agreement_submit_success(self):
self.user.update(read_dev_agreement=None)
response = self.client.post(
reverse('devhub.api_key_agreement'),
data={
'distribution_agreement': 'on',
'review_policy': 'on',
},
)
assert response.status_code == 302
assert response['Location'] == reverse('devhub.api_key')
self.user.reload()
self.assertCloseToNow(self.user.read_dev_agreement)
@override_switch('developer-agreement-captcha', active=True)
def test_agreement_submit_captcha_active_error(self):
self.user.update(read_dev_agreement=None)
response = self.client.post(reverse('devhub.api_key_agreement'))
# Captcha is properly rendered
doc = pq(response.content)
assert doc('.g-recaptcha')
assert 'recaptcha' in response.context['agreement_form'].errors
@override_switch('developer-agreement-captcha', active=True)
def test_agreement_submit_captcha_active_success(self):
self.user.update(read_dev_agreement=None)
verify_data = urlencode(
{
'secret': '',
'remoteip': '127.0.0.1',
'response': 'test',
}
)
responses.add(
responses.GET,
'https://www.google.com/recaptcha/api/siteverify?' + verify_data,
json={'error-codes': [], 'success': True},
)
response = self.client.post(
reverse('devhub.api_key_agreement'),
data={
'g-recaptcha-response': 'test',
'distribution_agreement': 'on',
'review_policy': 'on',
},
)
assert response.status_code == 302
assert response['Location'] == reverse('devhub.api_key')
self.user.reload()
self.assertCloseToNow(self.user.read_dev_agreement)
def test_agreement_read_but_too_long_ago(self):
set_config('last_dev_agreement_change_date', '2018-01-01 12:00')
before_agreement_last_changed = datetime(2018, 1, 1, 12, 0) - timedelta(days=1)
self.user.update(read_dev_agreement=before_agreement_last_changed)
response = self.client.get(reverse('devhub.api_key_agreement'))
assert response.status_code == 200
assert 'agreement_form' in response.context
@mock.patch('olympia.addons.utils.RestrictionChecker.is_submission_allowed')
def test_cant_submit_agreement_if_restricted(self, is_submission_allowed_mock):
is_submission_allowed_mock.return_value = False
self.user.update(read_dev_agreement=None)
response = self.client.post(
reverse('devhub.api_key_agreement'),
data={
'distribution_agreement': 'on',
'review_policy': 'on',
},
)
assert response.status_code == 200
assert response.context['agreement_form'].is_valid() is False
self.user.reload()
assert self.user.read_dev_agreement is None
assert is_submission_allowed_mock.call_count == 2
# First call is from the form, and it's not checking the agreement,
# it's just to see if the user is restricted.
assert is_submission_allowed_mock.call_args_list[0] == (
(),
{'check_dev_agreement': False},
)
# Second call is from the view itself, no arguments
assert is_submission_allowed_mock.call_args_list[1] == ((), {})
def test_cant_submit_agreement_if_restricted_functional(self):
# Like test_cant_submit_agreement_if_restricted() but with no mocks,
# picking a single restriction and making sure it's working properly.
IPNetworkUserRestriction.objects.create(network='127.0.0.1/32')
self.user.update(read_dev_agreement=None)
response = self.client.post(
reverse('devhub.api_key_agreement'),
data={
'distribution_agreement': 'on',
'review_policy': 'on',
},
)
assert response.status_code == 200
assert response.context['agreement_form'].is_valid() is False
doc = pq(response.content)
assert doc('.addon-submission-process').text() == (
'Multiple add-ons violating our policies have been submitted '
'from your location. The IP address has been blocked.\n'
'More information on Developer Accounts'
)
@mock.patch('olympia.addons.utils.RestrictionChecker.is_submission_allowed')
def test_agreement_page_shown_if_restricted(self, is_submission_allowed_mock):
# Like test_agreement_read() above, but with a restricted user: they
# are shown the agreement page again instead of redirecting to the
# api keys page.
is_submission_allowed_mock.return_value = False
self.user.update(read_dev_agreement=self.days_ago(0))
response = self.client.get(reverse('devhub.api_key_agreement'))
assert response.status_code == 200
assert 'agreement_form' in response.context
class TestAPIKeyPage(TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super(TestAPIKeyPage, self).setUp()
self.url = reverse('devhub.api_key')
assert self.client.login(email='<EMAIL>')
self.user = UserProfile.objects.get(email='<EMAIL>')
self.user.update(last_login_ip='192.168.1.1')
def test_key_redirect(self):
self.user.update(read_dev_agreement=None)
response = self.client.get(reverse('devhub.api_key'))
self.assert3xx(response, reverse('devhub.api_key_agreement'))
def test_redirect_if_restricted(self):
IPNetworkUserRestriction.objects.create(network='127.0.0.1/32')
response = self.client.get(reverse('devhub.api_key'))
self.assert3xx(response, reverse('devhub.api_key_agreement'))
def test_view_without_credentials_not_confirmed_yet(self):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
submit = doc('#generate-key')
assert submit.text() == 'Generate new credentials'
inputs = doc('.api-input input')
assert len(inputs) == 0, 'Inputs should be absent before keys exist'
assert not doc('input[name=confirmation_token]')
def test_view_with_credentials(self):
APIKey.objects.create(
user=self.user,
type=SYMMETRIC_JWT_TYPE,
key='some-jwt-key',
secret='some-jwt-secret',
)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
submit = doc('#generate-key')
assert submit.text() == 'Revoke and regenerate credentials'
assert doc('#revoke-key').text() == 'Revoke'
key_input = doc('.key-input input').val()
assert key_input == 'some-jwt-key'
def test_view_without_credentials_confirmation_requested_no_token(self):
APIKeyConfirmation.objects.create(
user=self.user, token='<PASSWORD>', confirmed_once=False
)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
# Since confirmation has already been requested, there shouldn't be
# any buttons on the page if no token was passed in the URL - the user
# needs to follow the link in the email to continue.
assert not doc('input[name=confirmation_token]')
assert not doc('input[name=action]')
def test_view_without_credentials_confirmation_requested_with_token(self):
APIKeyConfirmation.objects.create(
user=self.user, token='<PASSWORD>', confirmed_once=False
)
self.url | |
= None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SCENE_PT_keyframing_settings(SceneKeyingSetsPanel, SceneButtonsPanel,
bpy_types.Panel, bpy_types._GenericUI):
bl_context = None
''' '''
bl_label = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_keyframing_settings(self, context, layout, ks, ksp):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SCENE_PT_keying_set_paths(SceneKeyingSetsPanel, SceneButtonsPanel,
bpy_types.Panel, bpy_types._GenericUI):
bl_context = None
''' '''
bl_label = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_keyframing_settings(self, context, layout, ks, ksp):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SCENE_PT_keying_sets(SceneKeyingSetsPanel, SceneButtonsPanel,
bpy_types.Panel, bpy_types._GenericUI):
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_keyframing_settings(self, context, layout, ks, ksp):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SCENE_PT_rigid_body_cache(RigidBodySubPanel, SceneButtonsPanel,
bpy_types.Panel, bpy_types._GenericUI):
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SCENE_PT_rigid_body_field_weights(RigidBodySubPanel, SceneButtonsPanel,
bpy_types.Panel, bpy_types._GenericUI):
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class SCENE_PT_rigid_body_world_settings(RigidBodySubPanel, SceneButtonsPanel,
bpy_types.Panel,
bpy_types._GenericUI):
bl_context = None
''' '''
bl_label = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
| |
a watershed on the
distance-transformed thresholded image.
- *{WA_PROPAGATE}:* This method uses a propagation algorithm instead
of a watershed. The image is ignored and the pixels are assigned to
the objects by repeatedly adding unassigned pixels to the objects
that are immediately adjacent to them. This method is suited in cases
such as objects with branching extensions, for instance neurites,
where the goal is to trace outward from the cell body along the
branch, assigning pixels in the branch along the way. See the help
for the **IdentifySecondaryObjects** module for more details on this
method.
- *{WA_NONE}*: If objects are well separated and bright relative to
the background, it may be unnecessary to attempt to separate clumped
objects. Using the very fast *{WA_NONE}* option, a simple threshold
will be used to identify objects.
""".format(
**{
"WA_INTENSITY": WA_INTENSITY,
"WA_SHAPE": WA_SHAPE,
"WA_PROPAGATE": WA_PROPAGATE,
"WA_NONE": WA_NONE,
}
),
)
self.automatic_smoothing = Binary(
AUTOMATIC_SMOOTHING_SETTING_TEXT,
True,
doc="""\
*(Used only when distinguishing between clumped objects)*
Select "*{YES}*" to automatically calculate the amount of smoothing
applied to the image to assist in declumping. Select "*{NO}*" to
manually enter the smoothing filter size.
This setting, along with the *Minimum allowed distance between local
maxima* setting, affects whether objects close to each other are
considered a single object or multiple objects. It does not affect the
dividing lines between an object and the background.
Please note that this smoothing setting is applied after thresholding,
and is therefore distinct from the threshold smoothing method setting
above, which is applied *before* thresholding.
The size of the smoothing filter is automatically calculated based on
the *{SIZE_RANGE_SETTING_TEXT}* setting above. If you see too many
objects merged that ought to be separate or too many objects split up
that ought to be merged, you may want to override the automatically
calculated value.""".format(
**{
"YES": "Yes",
"NO": "No",
"SIZE_RANGE_SETTING_TEXT": SIZE_RANGE_SETTING_TEXT,
}
),
)
self.smoothing_filter_size = Integer(
SMOOTHING_FILTER_SIZE_SETTING_TEXT,
10,
doc="""\
*(Used only when distinguishing between clumped objects)*
If you see too many objects merged that ought to be separated
(under-segmentation), this value should be lower. If you see too many
objects split up that ought to be merged (over-segmentation), the
value should be higher.
Note that splitting and merging is also
affected by your choice of settings for the setting,
*{AUTOMATIC_MAXIMA_SUPPRESSION_SETTING_TEXT}* It is an art to balance
these two settings; read the help carefully for both.
Reducing the texture of objects by increasing the smoothing increases
the chance that each real, distinct object has only one peak of
intensity but also increases the chance that two distinct objects will
be recognized as only one object. Note that increasing the size of the
smoothing filter increases the processing time exponentially.
Enter 0 to prevent any image smoothing in certain cases; for example,
for low resolution images with small objects ( < ~5 pixels in
diameter).
""".format(
**{
"AUTOMATIC_MAXIMA_SUPPRESSION_SETTING_TEXT": AUTOMATIC_MAXIMA_SUPPRESSION_SETTING_TEXT
}
),
)
self.automatic_suppression = Binary(
AUTOMATIC_MAXIMA_SUPPRESSION_SETTING_TEXT,
True,
doc="""\
*(Used only when distinguishing between clumped objects)*
Select "*{YES}*" to automatically calculate the distance between
intensity maxima to assist in declumping. Select "*{NO}*" to manually
enter the permissible maxima distance.
This setting, along with the *{SMOOTHING_FILTER_SIZE_SETTING_TEXT}*
setting, affects whether objects close to each other are considered a
single object or multiple objects. It does not affect the dividing lines
between an object and the background. Local maxima that are closer
together than the minimum allowed distance will be suppressed (the local
intensity histogram is smoothed to remove the peaks within that
distance).
The distance can be automatically calculated based on the
minimum entered for the *{SIZE_RANGE_SETTING_TEXT}* setting above,
but if you see too many objects merged that ought to be separate, or too
many objects split up that ought to be merged, you may want to override
the automatically calculated value.""".format(
**{
"YES": "Yes",
"NO": "No",
"SMOOTHING_FILTER_SIZE_SETTING_TEXT": SMOOTHING_FILTER_SIZE_SETTING_TEXT,
"SIZE_RANGE_SETTING_TEXT": SIZE_RANGE_SETTING_TEXT,
}
),
)
self.maxima_suppression_size = Float(
"Suppress local maxima that are closer than this minimum allowed distance",
7,
minval=0,
doc="""\
*(Used only when distinguishing between clumped objects)*
Enter a positive integer, in pixel units. If you see too many objects
merged that ought to be separated (under-segmentation), the value
should be lower. If you see too many objects split up that ought to be
merged (over-segmentation), the value should be higher.
The maxima suppression distance should be set to be roughly equivalent
to the radius of the smallest object of interest that you would expect
to see in the experiment. Any distinct
“objects” that are found but are within two times this distance from
each other will be assumed to be actually two lumpy parts of the same
object, and they will be merged.
Note that splitting and merging is also
affected by your choice of settings for the setting,
*{SMOOTHING_FILTER_SIZE_SETTING_TEXT}* It is an art to balance
these two settings; read the help carefully for both.
""".format(
**{
"SMOOTHING_FILTER_SIZE_SETTING_TEXT": SMOOTHING_FILTER_SIZE_SETTING_TEXT
}
),
)
self.low_res_maxima = Binary(
"Speed up by using lower-resolution image to find local maxima?",
True,
doc="""\
*(Used only when distinguishing between clumped objects)*
Select "*{YES}*" to down-sample the image for declumping. This can be
helpful for saving processing time on large images.
Note that if you have entered a minimum object diameter of 10 or less,
checking this box will have no effect.""".format(
**{"YES": "Yes"}
),
)
self.fill_holes = Choice(
"Fill holes in identified objects?",
FH_ALL,
value=FH_THRESHOLDING,
doc="""\
This option controls how holes (regions of background surrounded by one
or more objects) are filled in:
- *{FH_THRESHOLDING}:* Fill in holes that are smaller than
the maximum object size prior to declumping and to fill in any holes
after declumping.
- *{FH_DECLUMP}:* Fill in holes located within identified
objects after declumping.
- *{FH_NEVER}:* Leave holes within objects.
Please note that if an object is located within a hole and
this option is enabled, the object will be lost when the hole is
filled in.""".format(
**{
"FH_THRESHOLDING": FH_THRESHOLDING,
"FH_DECLUMP": FH_DECLUMP,
"FH_NEVER": FH_NEVER,
}
),
)
self.limit_choice = Choice(
"Handling of objects if excessive number of objects identified",
[LIMIT_NONE, LIMIT_ERASE],
doc="""\
This setting deals with images that are segmented into an unreasonable
number of objects. This might happen if the module calculates a low
threshold or if the image has unusual artifacts.
**IdentifyPrimaryObjects** can handle this condition in one of three
ways:
- *{LIMIT_NONE}*: Continue processing regardless if large numbers of
objects are found.
- *{LIMIT_ERASE}*: Erase all objects if the number of objects exceeds
the maximum. This results in an image with no primary objects. This
option is a good choice if a large number of objects indicates that
the image should not be processed; it can save a lot of time in
subsequent **Measure** modules.""".format(
**{"LIMIT_NONE": LIMIT_NONE, "LIMIT_ERASE": LIMIT_ERASE}
),
)
self.maximum_object_count = Integer(
"Maximum number of objects",
value=500,
minval=2,
doc="""\
*(Used only when handling images with large numbers of objects by
erasing)*
This setting limits the number of objects in the image. See the
documentation for the previous setting for details.""",
)
self.want_plot_maxima = Binary(
"Display accepted local maxima?",
False,
doc="""\
*(Used only when distinguishing between clumped objects)*
Select "*{YES}*" to display detected local maxima on the object outlines plot. This can be
helpful for fine-tuning segmentation parameters.
Local maxima are small cluster of pixels from which objects are 'grown' during segmentation.
Each object in a declumped segmentation will have a single maxima.
For example, for intensity-based declumping, maxima should appear at the brightest points in an object.
If obvious intensity peaks are missing they were probably removed by the filters set above.""".format(
**{"YES": "Yes"}
),
)
self.maxima_color = Color(
"Select maxima color",
DEFAULT_MAXIMA_COLOR,
doc="Maxima will be displayed in this color.",
)
self.use_advanced = Binary(
"Use advanced settings?",
value=False,
doc="""\
Select "*{YES}*" to use advanced module settings.
If "*{NO}*" is selected, the following settings are used:
- *{THRESHOLD_SCOPE_TEXT}*: {THRESHOLD_SCOPE_VALUE}
- *{THRESHOLD_METHOD_TEXT}*: {THRESHOLD_METHOD_VALUE}
- *{THRESHOLD_SMOOTHING_SCALE_TEXT}*:
{THRESHOLD_SMOOTHING_SCALE_VALUE} (sigma = 1)
- *{THRESHOLD_CORRECTION_FACTOR_TEXT}*:
{THRESHOLD_CORRECTION_FACTOR_VALUE}
- *{THRESHOLD_RANGE_TEXT}*: minimum {THRESHOLD_RANGE_MIN}, maximum
{THRESHOLD_RANGE_MAX}
- *{UNCLUMP_METHOD_TEXT}*: {UNCLUMP_METHOD_VALUE}
- *{WATERSHED_METHOD_TEXT}*: {WATERSHED_METHOD_VALUE}
- *{AUTOMATIC_SMOOTHING_TEXT}*: *{YES}*
- *{AUTOMATIC_SUPPRESSION_TEXT}*: *{YES}*
- *{LOW_RES_MAXIMA_TEXT}*: *{YES}*
- *{FILL_HOLES_TEXT}*: {FILL_HOLES_VALUE}
- *{LIMIT_CHOICE_TEXT}*: {LIMIT_CHOICE_VALUE}""".format(
**{
"AUTOMATIC_SMOOTHING_TEXT": self.automatic_smoothing.get_text(),
"AUTOMATIC_SUPPRESSION_TEXT": self.automatic_suppression.get_text(),
"FILL_HOLES_TEXT": self.fill_holes.get_text(),
"FILL_HOLES_VALUE": FH_THRESHOLDING,
"LIMIT_CHOICE_TEXT": self.limit_choice.get_text(),
"LIMIT_CHOICE_VALUE": LIMIT_NONE,
"LOW_RES_MAXIMA_TEXT": self.low_res_maxima.get_text(),
"NO": "No",
"THRESHOLD_CORRECTION_FACTOR_TEXT": self.threshold.threshold_correction_factor.get_text(),
"THRESHOLD_CORRECTION_FACTOR_VALUE": 1.0,
"THRESHOLD_METHOD_TEXT": self.threshold.global_operation.get_text(),
"THRESHOLD_METHOD_VALUE": threshold.TM_LI,
"THRESHOLD_RANGE_MAX": 1.0,
"THRESHOLD_RANGE_MIN": 0.0,
"THRESHOLD_RANGE_TEXT": self.threshold.threshold_range.get_text(),
"THRESHOLD_SCOPE_TEXT": self.threshold.threshold_scope.get_text(),
"THRESHOLD_SCOPE_VALUE": threshold.TS_GLOBAL,
"THRESHOLD_SMOOTHING_SCALE_TEXT": self.threshold.threshold_smoothing_scale.get_text(),
"THRESHOLD_SMOOTHING_SCALE_VALUE": 1.3488,
"UNCLUMP_METHOD_TEXT": self.unclump_method.get_text(),
"UNCLUMP_METHOD_VALUE": UN_INTENSITY,
"WATERSHED_METHOD_TEXT": self.watershed_method.get_text(),
"WATERSHED_METHOD_VALUE": WA_INTENSITY,
"YES": "Yes",
}
),
)
self.threshold_setting_version = Integer(
"Threshold setting version", value=self.threshold.variable_revision_number
)
self.threshold.create_settings()
self.threshold.threshold_smoothing_scale.value = | |
<reponame>vinjn/net-doctor<gh_stars>0
# $Id: dns.py 27 2006-11-21 01:22:52Z dahelder $
# -*- coding: utf-8 -*-
"""Domain Name System."""
from __future__ import print_function
from __future__ import absolute_import
import struct
import codecs
from . import dpkt
from .compat import compat_ord
DNS_Q = 0
DNS_R = 1
# Opcodes
DNS_QUERY = 0
DNS_IQUERY = 1
DNS_STATUS = 2
DNS_NOTIFY = 4
DNS_UPDATE = 5
# Flags
DNS_CD = 0x0010 # checking disabled
DNS_AD = 0x0020 # authenticated data
DNS_Z = 0x0040 # unused
DNS_RA = 0x0080 # recursion available
DNS_RD = 0x0100 # recursion desired
DNS_TC = 0x0200 # truncated
DNS_AA = 0x0400 # authoritative answer
DNS_QR = 0x8000 # response ( query / response )
# Response codes
DNS_RCODE_NOERR = 0
DNS_RCODE_FORMERR = 1
DNS_RCODE_SERVFAIL = 2
DNS_RCODE_NXDOMAIN = 3
DNS_RCODE_NOTIMP = 4
DNS_RCODE_REFUSED = 5
DNS_RCODE_YXDOMAIN = 6
DNS_RCODE_YXRRSET = 7
DNS_RCODE_NXRRSET = 8
DNS_RCODE_NOTAUTH = 9
DNS_RCODE_NOTZONE = 10
# RR types
DNS_A = 1
DNS_NS = 2
DNS_CNAME = 5
DNS_SOA = 6
DNS_NULL = 10
DNS_PTR = 12
DNS_HINFO = 13
DNS_MX = 15
DNS_TXT = 16
DNS_AAAA = 28
DNS_SRV = 33
DNS_OPT = 41
# RR classes
DNS_IN = 1
DNS_CHAOS = 3
DNS_HESIOD = 4
DNS_ANY = 255
def pack_name(name, off, label_ptrs):
name = codecs.encode(name, 'utf-8')
if name:
labels = name.split(b'.')
else:
labels = []
labels.append(b'')
buf = b''
for i, label in enumerate(labels):
key = b'.'.join(labels[i:]).upper()
ptr = label_ptrs.get(key)
if not ptr:
if len(key) > 1:
ptr = off + len(buf)
if ptr < 0xc000:
label_ptrs[key] = ptr
i = len(label)
buf += struct.pack("B", i) + label
else:
buf += struct.pack('>H', (0xc000 | ptr))
break
return buf
def unpack_name(buf, off):
name = []
saved_off = 0
start_off = off
name_length = 0
while True:
if off >= len(buf):
raise dpkt.NeedData()
n = compat_ord(buf[off])
if n == 0:
off += 1
break
elif (n & 0xc0) == 0xc0:
ptr = struct.unpack('>H', buf[off:off + 2])[0] & 0x3fff
if ptr >= start_off:
raise dpkt.UnpackError('Invalid label compression pointer')
off += 2
if not saved_off:
saved_off = off
start_off = off = ptr
elif (n & 0xc0) == 0x00:
off += 1
name.append(buf[off:off + n])
name_length += n + 1
if name_length > 255:
raise dpkt.UnpackError('name longer than 255 bytes')
off += n
else:
raise dpkt.UnpackError('Invalid label length %02x' % n)
if not saved_off:
saved_off = off
return codecs.decode(b'.'.join(name), 'utf-8'), saved_off
class DNS(dpkt.Packet):
"""Domain Name System.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of DNS.
TODO.
"""
__hdr__ = (
('id', 'H', 0),
('op', 'H', DNS_RD), # recursive query
# XXX - lists of query, RR objects
('qd', 'H', []),
('an', 'H', []),
('ns', 'H', []),
('ar', 'H', [])
)
@property
def qr(self):
return int((self.op & DNS_QR) == DNS_QR)
@qr.setter
def qr(self, v):
if v:
self.op |= DNS_QR
else:
self.op &= ~DNS_QR
@property
def opcode(self):
return (self.op >> 11) & 0xf
@opcode.setter
def opcode(self, v):
self.op = (self.op & ~0x7800) | ((v & 0xf) << 11)
@property
def aa(self):
return int((self.op & DNS_AA) == DNS_AA)
@aa.setter
def aa(self, v):
if v:
self.op |= DNS_AA
else:
self.op &= ~DNS_AA
@property
def tc(self):
return int((self.op & DNS_TC) == DNS_TC)
@tc.setter
def tc(self, v):
if v:
self.op |= DNS_TC
else:
self.op &= ~DNS_TC
@property
def rd(self):
return int((self.op & DNS_RD) == DNS_RD)
@rd.setter
def rd(self, v):
if v:
self.op |= DNS_RD
else:
self.op &= ~DNS_RD
@property
def ra(self):
return int((self.op & DNS_RA) == DNS_RA)
@ra.setter
def ra(self, v):
if v:
self.op |= DNS_RA
else:
self.op &= ~DNS_RA
@property
def zero(self):
return int((self.op & DNS_Z) == DNS_Z)
@zero.setter
def zero(self, v):
if v:
self.op |= DNS_Z
else:
self.op &= ~DNS_Z
@property
def rcode(self):
return self.op & 0xf
@rcode.setter
def rcode(self, v):
self.op = (self.op & ~0xf) | (v & 0xf)
class Q(dpkt.Packet):
"""DNS question."""
__hdr__ = (
('name', '1025s', b''),
('type', 'H', DNS_A),
('cls', 'H', DNS_IN)
)
# XXX - suk
def __len__(self):
raise NotImplementedError
__str__ = __len__
def unpack(self, buf):
raise NotImplementedError
class RR(Q):
"""DNS resource record."""
__hdr__ = (
('name', '1025s', b''),
('type', 'H', DNS_A),
('cls', 'H', DNS_IN),
('ttl', 'I', 0),
('rlen', 'H', 4),
('rdata', 's', b'')
)
def pack_rdata(self, off, label_ptrs):
# XXX - yeah, this sux
if self.rdata:
return self.rdata
if self.type == DNS_A:
return self.ip
elif self.type == DNS_NS:
return pack_name(self.nsname, off, label_ptrs)
elif self.type == DNS_CNAME:
return pack_name(self.cname, off, label_ptrs)
elif self.type == DNS_PTR:
return pack_name(self.ptrname, off, label_ptrs)
elif self.type == DNS_SOA:
l_ = []
l_.append(pack_name(self.mname, off, label_ptrs))
l_.append(pack_name(self.rname, off + len(l_[0]), label_ptrs))
l_.append(struct.pack('>IIIII', self.serial, self.refresh,
self.retry, self.expire, self.minimum))
return b''.join(l_)
elif self.type == DNS_MX:
return struct.pack('>H', self.preference) + \
pack_name(self.mxname, off + 2, label_ptrs)
elif self.type == DNS_TXT or self.type == DNS_HINFO:
return b''.join(struct.pack('B', len(x)) + x for x in self.text)
elif self.type == DNS_AAAA:
return self.ip6
elif self.type == DNS_SRV:
return struct.pack('>HHH', self.priority, self.weight, self.port) + \
pack_name(self.srvname, off + 6, label_ptrs)
elif self.type == DNS_OPT:
return b'' # self.rdata
else:
raise dpkt.PackError('RR type %s is not supported' % self.type)
def unpack_rdata(self, buf, off):
if self.type == DNS_A:
self.ip = self.rdata
elif self.type == DNS_NS:
self.nsname, off = unpack_name(buf, off)
elif self.type == DNS_CNAME:
self.cname, off = unpack_name(buf, off)
elif self.type == DNS_PTR:
self.ptrname, off = unpack_name(buf, off)
elif self.type == DNS_SOA:
self.mname, off = unpack_name(buf, off)
self.rname, off = unpack_name(buf, off)
self.serial, self.refresh, self.retry, self.expire, self.minimum = \
struct.unpack('>IIIII', buf[off:off + 20])
elif self.type == DNS_MX:
self.preference = struct.unpack('>H', self.rdata[:2])
self.mxname, off = unpack_name(buf, off + 2)
elif self.type == DNS_TXT or self.type == DNS_HINFO:
self.text = []
buf = self.rdata
while buf:
n = compat_ord(buf[0])
self.text.append(codecs.decode(buf[1:1 + n], 'utf-8'))
buf = buf[1 + n:]
elif self.type == DNS_AAAA:
self.ip6 = self.rdata
elif self.type == DNS_NULL:
self.null = codecs.encode(self.rdata, 'hex')
elif self.type == DNS_SRV:
self.priority, self.weight, self.port = struct.unpack('>HHH', self.rdata[:6])
self.srvname, off = unpack_name(buf, off + 6)
elif self.type == DNS_OPT:
pass # RFC-6891: OPT is a pseudo-RR not carrying any DNS data
else:
raise dpkt.UnpackError('RR type %s is not supported' % self.type)
def pack_q(self, buf, q):
"""Append packed DNS question and return buf."""
return buf + pack_name(q.name, len(buf), self.label_ptrs) + struct.pack('>HH', q.type, q.cls)
def unpack_q(self, buf, off):
"""Return DNS question and new offset."""
q = self.Q()
q.name, off = unpack_name(buf, off)
q.type, q.cls = struct.unpack('>HH', buf[off:off + 4])
off += 4
return q, off
def pack_rr(self, buf, rr):
"""Append packed DNS RR and return buf."""
name = pack_name(rr.name, len(buf), self.label_ptrs)
rdata = rr.pack_rdata(len(buf) + len(name) + 10, self.label_ptrs)
return buf + name + struct.pack('>HHIH', rr.type, rr.cls, rr.ttl, len(rdata)) + rdata
def unpack_rr(self, buf, off):
"""Return DNS RR and new offset."""
rr = self.RR()
rr.name, off = unpack_name(buf, off)
rr.type, rr.cls, rr.ttl, rdlen = struct.unpack('>HHIH', buf[off:off + 10])
off += 10
rr.rdata = buf[off:off + rdlen]
rr.rlen = rdlen
rr.unpack_rdata(buf, off)
off += rdlen
return rr, off
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
off = self.__hdr_len__
cnt = self.qd # FIXME: This relies on this being properly set somewhere else
self.qd = []
for _ in range(cnt):
q, off = self.unpack_q(buf, off)
self.qd.append(q)
for x in ('an', 'ns', 'ar'):
cnt = getattr(self, x, 0)
setattr(self, x, [])
for _ in range(cnt):
rr, off = self.unpack_rr(buf, off)
getattr(self, x).append(rr)
self.data = b''
def __len__(self):
# XXX - cop out
return len(bytes(self))
def __bytes__(self):
# XXX - compress names on the fly
self.label_ptrs = {}
buf = struct.pack(self.__hdr_fmt__, self.id, self.op, len(self.qd),
len(self.an), len(self.ns), len(self.ar))
for q in self.qd:
buf = self.pack_q(buf, q)
for x in ('an', 'ns', 'ar'):
for rr in getattr(self, x):
buf = self.pack_rr(buf, rr)
del self.label_ptrs
return buf
# TESTS
def define_testdata():
"""
Reference test data is stored in the dynamically defined class.
It is created in this way so that we can import unhexlify only during
testing, and not during normal use.
"""
from binascii import unhexlify
class TestData(object):
a_resp = unhexlify(
"059c8180000100010000000106676f6f676c6503636f6d0000010001c00c00010"
"0010000012b0004d83ace2e0000290200000000000000"
)
aaaa_resp = unhexlify(
"7f228180000100010000000005676d61696c03636f6d00001c0001c00c001c000"
"10000012b00102a001450400908020000000000002005"
)
cname_resp = unhexlify(
"a154818000010001000000000377777705676d61696c03636f6d0000010001c00"
"c000500010000545f000e046d61696c06676f6f676c65c016"
)
invalid_rr = unhexlify(
"000001000000000100000000046e616d650000150001000000000000"
)
mx_resp = unhexlify(
"053b8180000100010000000006676f6f676c6503636f6d00000f0001c00c000f0"
"001000002570011001e04616c7432056173706d78016cc00c"
)
null_resp = unhexlify(
"12b0840000010001000000000b626c6168626c616836363606706972617465037"
"3656100000a0001c00c000a00010000000000095641434b4403c5e901"
)
opt_resp = unhexlify(
"8d6e0110000100000000000104783131310678787878313106616b616d6169036"
"e657400000100010000290fa0000080000000"
)
ptr_resp = unhexlify(
"67028180000100010003000001310131033231310331343107696e2d616464720"
"46172706100000c0001c00c000c000100000d3600240764656661756c740a762d"
"756d63652d69667305756d6e657405756d6963680365647500c00e00020001000"
"00d36000d0673686162627903696673c04fc00e0002000100000d36000f0c6669"
"73682d6c6963656e7365c06dc00e0002000100000d36000b04646e73320369746"
"4c04f"
)
soa_resp = unhexlify(
"851f8180000100010000000006676f6f676c6503636f6d0000060001c00c00060"
"001000000230026036e7332c00c09646e732d61646d696ec00c0a747447000003"
"8400000384000007080000003c"
)
srv_resp = unhexlify(
"7f2281800001000100000000075f6a6162626572045f746370066a61626265720"
"3636f6d0000210001c00c0021000100000e0f001a000a000014950764656e6a61"
"6232066a616262657203636f6d00"
)
| |
<filename>src/exomole/read_def.py
"""Module grouping some data-classes and the parser for reading and parsing the
ExoMol *.def* files.
"""
import warnings
from pathlib import Path
from pyvalem.formula import Formula, FormulaParseError
from .exceptions import (
LineValueError,
LineCommentError,
LineWarning,
DefParseError,
DefConsistencyError,
)
from .utils import (
get_file_raw_text_over_api,
parse_exomol_line,
get_num_columns,
DataClass,
)
# noinspection PyUnresolvedReferences
class Isotope(DataClass):
"""A data class representing isotope instances.
All the parameters passed are stored as instance attributes.
Parameters
----------
number : int
element_symbol : str
"""
def __init__(self, number, element_symbol):
super().__init__(number=number, element_symbol=element_symbol)
def __repr__(self):
return f"Isotope({self.number}{self.element_symbol})"
class IrreducibleRepresentation(DataClass):
"""A data class representing instances of irreducible representations.
All the parameters passed are stored as instance attributes.
Parameters
----------
ir_id : str
ID of the *irreducible representation*
label : str
nuclear_spin_degeneracy : int
"""
def __init__(self, ir_id, label, nuclear_spin_degeneracy):
super().__init__(
id=ir_id, label=label, nuclear_spin_degeneracy=nuclear_spin_degeneracy
)
class QuantumCase(DataClass):
"""A data class representing the quantum case instances.
All the parameters passed are stored as instance attributes.
Parameters
----------
label : str
"""
def __init__(self, label):
super().__init__(label=label)
# noinspection PyUnresolvedReferences
class Quantum(DataClass):
"""A data class representing the quanta instances.
All the parameters passed are stored as instance attributes.
Parameters
----------
label : str
q_format : str
The quantum format as specified by the *.def* file
description : str
"""
def __init__(self, label, q_format, description):
super().__init__(label=label, format=q_format, description=description)
def __repr__(self):
return f"Quantum({self.label})"
class DefParser:
"""Class handling parsing of any particular *.def* file.
Parses the *.def* file specified either by the `path` argument passed and leading to
the *.def* file on the local file system, or by the trio of `molecule_slug`,
`isotopologue_slug` and `dataset_name` arguments, in which case the *.def* file
is requested via the ExoMol public API.
Instantiating the class only saves the `raw_text` attribute, which can be parsed
with the `parse` method into all the available info. All the *relevant* attributes
are listed in the **Attributes** section.
Parameters
----------
path : str or Path, optional
The path leading to the *.def* file. If supplied, all the other arguments are
simply ignored.
molecule_slug : str, optional
Only required, if the `path` argument is not passed.
isotopologue_slug : str, optional
Only required, if the `path` argument is not passed.
dataset_name : str, optional
Only required, if the `path` argument is not passed.
Attributes
----------
raw_text : str
file_name : str
version : int
iso_formula : str
iso_slug : str
isotopes : list of Isotope
lifetime_availability : bool
lande_factor_availability : bool
quanta : list of Quantum
Raises
------
APIError
If `path` not passed and the ExoMol API request call results in an unsuccessful
response.
Notes
-----
See the ExoMol file standard as defined in the **ExoMol release paper** [1]_.
References
----------
.. [1] <NAME>, et al. The ExoMol database: molecular line lists for
exoplanet and other hot atmospheres. J Mol Spectrosc 2016;327:73–94.
doi: 10.1016/j.jms.2016.05.002
Examples
--------
Instantiate the parser:
>>> parser = DefParser(
... path="tests/resources/exomol_data/CaH/40Ca-1H/Yadin/40Ca-1H__Yadin.def"
... )
>>> parser.file_name
'40Ca-1H__Yadin.def'
>>> parser.raw_text[:10] # first 10 characters of the text
'EXOMOL.def'
Parse the .def file:
>>> parser.parse(warn_on_comments=True)
>>> parser.id
'EXOMOL.def'
>>> parser.iso_formula
'(40Ca)(1H)'
>>> parser.mass
40.970416
>>> parser.isotopes
[Isotope(40Ca), Isotope(1H)]
>>> quanta = parser.quanta
>>> quanta
[Quantum(par), Quantum(v), Quantum(N), Quantum(e/f)]
>>> quanta[0].description
"total parity: '+' or '-'"
>>> parser.lifetime_availability, parser.lande_factor_availability
(True, False)
Additional methods on the parsed data:
>>> parser.get_quanta_labels()
['par', 'v', 'N', 'e/f']
>>> # with parser.lifetime_availability, we expect 9 columns in the .states file
>>> parser.get_states_header()
['i', 'E', 'g_tot', 'J', 'tau', 'par', 'v', 'N', 'e/f']
"""
def __init__(
self,
path=None,
molecule_slug=None,
isotopologue_slug=None,
dataset_name=None,
):
self.local = path is not None
self.path = Path(path) if path is not None else None
self.raw_text = None
self.file_name = None
self._save_raw_text(path, molecule_slug, isotopologue_slug, dataset_name)
# placeholders to all the attributes
self.id = None
self.iso_formula = None
self.iso_slug = None
self.dataset_name = None
self.version = None
self.inchi_key = None
self.isotopes = None
self.mass = None
self.symmetry_group = None
self.irreducible_representations = None
self.max_temp = None
self.num_pressure_broadeners = None
self.dipole_availability = None
self.num_cross_sections = None
self.num_k_coefficients = None
self.lifetime_availability = None
self.lande_factor_availability = None
self.num_states = None
self.quanta_cases = None
self.quanta = None
self.num_transitions = None
self.num_trans_files = None
self.max_wavenumber = None
self.high_energy_complete = None
self.parsed = False
def _save_raw_text(self, path, molecule_slug, isotopologue_slug, dataset_name):
"""Save the raw text of a *.def* file as an instance attribute
The *.def* file is either read from the local file system, or requested over the
ExoMol public API, based on the attributes values.
Parameters
----------
path : str or Path, optional
Path leading to the *.def* file. If supplied, all the other arguments are
ignored.
molecule_slug : str, optional
Ignored if `path` is passed.
isotopologue_slug : str or None
Ignored if `path` is passed.
dataset_name : str, optional
Ignored if `path` is passed.
"""
if path is None:
self.raw_text = get_file_raw_text_over_api(
"def", molecule_slug, isotopologue_slug, dataset_name
)
self.file_name = f"{isotopologue_slug}__{dataset_name}.def"
else:
with open(path, "r") as fp:
self.raw_text = fp.read()
self.file_name = Path(path).name
def parse(self, warn_on_comments=True):
"""Parse the *.def* file text from the `raw_text` attribute.
Populates all the instance attributes incrementally, util it hits the end of
the file, or one of the exceptions is raised, signaling inconsistent *.def*
file.
Parameters
----------
warn_on_comments : bool
If ``True``, the comments behind the ``#`` symbol on each line are checked
against some expected comments (hard-coded in the method) and the
`LineWarning` is raised if they do not match.
Raises
-------
DefParseError
Raised if value on any line cannot be cast to the expected ``type``, or if
the parser runs out of lines. This error signals an inconsistent *.def*
file. Also raised when any other inconsistencies are detected, such as
formula not supported by the `PyValem` package, etc.
Warns
-----
LineWarning
Raised if `warns_on_comments` is ``True`` and if the comment on any line
does not match the expected text hard-coded in this method.
Also raised if any empty line is present in the .def file, or of the
number of isotope sections does not match the number of atoms in the
isotopologue.
Warnings
--------
Currently the parser stops after the *High Energy Complete* line and does not
parse the rest of the *.def* file, as the info beyond this point in the *.def*
file was not needed for the data product application which served as my
motivation to write this package.
"""
lines = self.raw_text.split("\n")
n_orig = len(lines)
def parse_line(expected_comment, val_type=None, local_lines=lines):
return parse_exomol_line(
local_lines,
n_orig,
expected_comment=expected_comment,
file_name=self.file_name,
val_type=val_type,
warn_on_comments=warn_on_comments,
)
# catch all the parse_line-originated errors and wrap them in a higher-level
# error:
try:
self.id = parse_line("ID")
if self.id != "EXOMOL.def":
raise DefParseError(f"Unexpected ID in {self.file_name}")
self.iso_formula = parse_line("IsoFormula")
self.iso_slug = parse_line("Iso-slug")
self.dataset_name = parse_line("Isotopologue dataset name")
self.version = parse_line("Version number with format YYYYMMDD", int)
self.inchi_key = parse_line("Inchi key of molecule")
self.isotopes = []
num_atoms = parse_line("Number of atoms", int)
try:
formula = Formula(self.iso_formula)
except FormulaParseError as e:
raise DefParseError(f"{str(e)} (raised in {self.file_name})")
if formula.natoms != num_atoms:
raise DefParseError(f"Incorrect number of atoms in {self.file_name}")
# many (probably all) .def files for polyatomic datasets actually do not
# list all isotopes, but rather only all *distinct* isotopes.from
# I'll handle this with a Warning.
num_distinct_atoms = len(formula.atoms)
def add_isotope(num, el_symbol):
try:
Formula(f"({num}{el_symbol})")
except FormulaParseError as exc:
raise DefParseError(f"{str(exc)} (raised in {self.file_name})")
isotope = Isotope(number=num, element_symbol=el_symbol)
self.isotopes.append(isotope)
i = 0
for i in range(num_distinct_atoms):
number = parse_line(f"Isotope number {i + 1}", int)
element_symbol = parse_line(f"Element symbol {i + 1}")
add_isotope(number, element_symbol)
if num_distinct_atoms < num_atoms:
num_additional_isotopes_expected = num_atoms - num_distinct_atoms
lines_clone = lines.copy()
try:
for j in range(i + 1, i + 1 + num_additional_isotopes_expected):
number = parse_line(
f"Isotope number {j + 1}", int, local_lines=lines_clone
)
element_symbol = parse_line(
f"Element symbol {j + 1}", local_lines=lines_clone
)
add_isotope(number, element_symbol)
except (LineValueError, LineCommentError):
# This means that the .def file lists only distinct isotopes, not
# all isotopes, as it should. Handle with Warning and continue with
# the original lines
warnings.warn(
f"Incorrect number of isotopes listed in {self.file_name}",
LineWarning,
)
else:
# This means that the try clause did not raise anything, | |
#!/bin/env python
# Automatically translated python version of
# OpenSceneGraph example program "osgthreadedterrain"
# !!! This program will need manual tuning before it will work. !!!
import sys
from osgpypp import OpenThreads
from osgpypp import osg
from osgpypp import osgDB
from osgpypp import osgGA
from osgpypp import osgTerrain
from osgpypp import osgText
from osgpypp import osgUtil
from osgpypp import osgViewer
# Translated from file 'osgthreadedterrain.cpp'
# OpenSceneGraph example, osgterrain.
#*
#* Permission is hereby granted, free of charge, to any person obtaining a copy
#* of this software and associated documentation files (the "Software"), to deal
#* in the Software without restriction, including without limitation the rights
#* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#* copies of the Software, and to permit persons to whom the Software is
#* furnished to do so, subject to the following conditions:
#*
#* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#* THE SOFTWARE.
#
#include <OpenThreads/Block>
#include <osg/Group>
#include <osg/Geode>
#include <osg/ShapeDrawable>
#include <osg/Texture2D>
#include <osg/PositionAttitudeTransform>
#include <osg/MatrixTransform>
#include <osg/CoordinateSystemNode>
#include <osg/ClusterCullingCallback>
#include <osg/ArgumentParser>
#include <osgDB/FileUtils>
#include <osgDB/fstream>
#include <osgDB/ReadFile>
#include <osgUtil/IncrementalCompileOperation>
#include <osgText/FadeText>
#include <osgViewer/Viewer>
#include <osgViewer/ViewerEventHandlers>
#include <osgGA/TrackballManipulator>
#include <osgGA/FlightManipulator>
#include <osgGA/DriveManipulator>
#include <osgGA/KeySwitchMatrixManipulator>
#include <osgGA/StateSetManipulator>
#include <osgGA/AnimationPathManipulator>
#include <osgGA/TerrainManipulator>
#include <osgTerrain/TerrainTile>
#include <osgTerrain/GeometryTechnique>
#include <osgTerrain/Layer>
#include <iostream>
typedef std.vector< osg.GraphicsThread > GraphicsThreads
class ReleaseBlockOnCompileCompleted (osgUtil.IncrementalCompileOperation.CompileCompletedCallback) :
ReleaseBlockOnCompileCompleted(osg.RefBlockCount* block):
_block(block)
def compileCompleted(compileSet):
if _block.valid() : _block.completed()
# tell IncrementalCompileOperation that it's now safe to remove the compileSet
osg.notify(osg.NOTICE), "compileCompleted(", compileSet, ")"
return True
_block = osg.RefBlockCount()
class LoadAndCompileOperation (osg.Operation) :
LoadAndCompileOperation( str filename, osgUtil.IncrementalCompileOperation* ico , osg.RefBlockCount* block):
Operation("Load and compile Operation", False),
_filename(filename),
_incrementalCompileOperation(ico),
_block(block)
virtual void operator () (osg.Object* object)
# osg.notify(osg.NOTICE), "LoadAndCompileOperation ", _filename
_loadedModel = osgDB.readNodeFile(_filename)
if _loadedModel.valid() and _incrementalCompileOperation.valid() :
compileSet = osgUtil.IncrementalCompileOperation.CompileSet(_loadedModel)
compileSet._compileCompletedCallback = ReleaseBlockOnCompileCompleted(_block)
_incrementalCompileOperation.add(compileSet)
else:
if _block.valid() : _block.completed()
# osg.notify(osg.NOTICE), "done LoadAndCompileOperation ", _filename
_filename = str()
_loadedModel = osg.Node()
_incrementalCompileOperation = osgUtil.IncrementalCompileOperation()
_block = osg.RefBlockCount()
class MasterOperation (osg.Operation) :
typedef std.set<str> Files
typedef std.map<str, osg.Node > FilenameNodeMap
typedef std.vector< osg.Node > Nodes
MasterOperation( str filename, osgUtil.IncrementalCompileOperation* ico):
Operation("Master reading operation",True),
_filename(filename),
_incrementalCompileOperation(ico)
#* Set the OperationQueue that the MasterOperation can use to place tasks like file loading on for other processes to handle.
def setOperationQueue(oq):
_operationQueue = oq
def getOperationQueue():
return _operationQueue
def readMasterFile(files):
fin = osgDB.ifstream(_filename.c_str())
if fin :
fr = osgDB.Input()
fr.attach(fin)
readFilename = False
while not fr.eof() :
itrAdvanced = False
if fr.matchSequence("file %s") or fr.matchSequence("file %w") :
files.insert(fr[1].getStr())
fr += 2
itrAdvanced = True
readFilename = True
if not itrAdvanced :
++fr
return readFilename
return False
def open(group):
files = Files()
readMasterFile(files)
for(Files.iterator itr = files.begin()
not = files.end()
++itr)
model = osgDB.readNodeFile(*itr)
if model :
osg.notify(osg.NOTICE), "open: Loaded file ", *itr
group.addChild(model)
_existingFilenameNodeMap[*itr] = model
return True
virtual void operator () (osg.Object* callingObject)
# decided which method to call according to whole has called me.
viewer = dynamic_cast<osgViewer.Viewer*>(callingObject)
if viewer : update(viewer.getSceneData())
load = else()
def load():
#osg.notify(osg.NOTICE), "void load(Object)"
filesA = Files()
filesB = Files()
readMasterFile(filesB)
# osg.notify(osg.NOTICE), "First read ", filesA.size()
# itererate until the master file is stable
do
OpenThreads.Thread.microSleep(100000)
filesB.swap(filesA)
filesB.clear()
readMasterFile(filesB)
# osg.notify(osg.NOTICE), "second read ", filesB.size()
while filesA not =filesB :
files = Files()
files.swap(filesB)
# osg.notify(osg.NOTICE), "Now equal ", files.size()
newFiles = Files()
removedFiles = Files()
# find out which files are , and which ones have been removed.
lock = OpenThreads.ScopedLock<OpenThreads.Mutex>(_mutex)
for(Files.iterator fitr = files.begin()
not = files.end()
++fitr)
if _existingFilenameNodeMap.count(*fitr)==0 : newFiles.insert(*fitr)
for(FilenameNodeMap.iterator litr = _existingFilenameNodeMap.begin()
not = _existingFilenameNodeMap.end()
++litr)
if files.count(litr.first)==0 :
removedFiles.insert(litr.first)
#if 0
if not newFiles.empty() or not removedFiles.empty() :
osg.notify(osg.NOTICE), "void operator () files.size()=", files.size()
#endif
# first load the files.
nodesToAdd = FilenameNodeMap()
if not newFiles.empty() :
typedef std.vector< osg.GraphicsThread > GraphicsThreads
threads = GraphicsThreads()
for(unsigned int i=0 i<= osg.GraphicsContext.getMaxContextID() ++i)
gc = osg.GraphicsContext.getCompileContext(i)
gt = gc.getGraphicsThread() if (gc) else 0
if gt : threads.push_back(gt)
if _operationQueue.valid() :
# osg.notify(osg.NOTICE), "Using OperationQueue"
_endOfLoadBlock = osg.RefBlockCount(newFiles.size())
_endOfLoadBlock.reset()
typedef std.list< LoadAndCompileOperation > LoadAndCompileList
loadAndCompileList = LoadAndCompileList()
for(Files.iterator nitr = newFiles.begin()
not = newFiles.end()
++nitr)
# osg.notify(osg.NOTICE), "Adding LoadAndCompileOperation ", *nitr
loadAndCompile = LoadAndCompileOperation( *nitr, _incrementalCompileOperation, _endOfLoadBlock )
loadAndCompileList.push_back(loadAndCompile)
_operationQueue.add( loadAndCompile )
#if 1
operation = osg.Operation()
while operation=_operationQueue.getNextOperation() :.valid() :
# osg.notify(osg.NOTICE), "Local running of operation"
(*operation)(0)
#endif
# osg.notify(osg.NOTICE), "Waiting for completion of LoadAndCompile operations"
_endOfLoadBlock.block()
# osg.notify(osg.NOTICE), "done ... Waiting for completion of LoadAndCompile operations"
for(LoadAndCompileList.iterator litr = loadAndCompileList.begin()
not = loadAndCompileList.end()
++litr)
if *litr :._loadedModel.valid() :
nodesToAdd[(*litr)._filename] = (*litr)._loadedModel
else:
_endOfLoadBlock = osg.RefBlockCount(newFiles.size())
_endOfLoadBlock.reset()
for(Files.iterator nitr = newFiles.begin()
not = newFiles.end()
++nitr)
loadedModel = osgDB.readNodeFile(*nitr)
if loadedModel :
nodesToAdd[*nitr] = loadedModel
if _incrementalCompileOperation.valid() :
compileSet = osgUtil.IncrementalCompileOperation.CompileSet(loadedModel)
compileSet._compileCompletedCallback = ReleaseBlockOnCompileCompleted(_endOfLoadBlock)
_incrementalCompileOperation.add(compileSet)
else:
_endOfLoadBlock.completed()
else:
_endOfLoadBlock.completed()
_endOfLoadBlock.block()
requiresBlock = False
# pass the locally peppared data to MasterOperations shared data
# so that updated thread can merge these changes with the main scene
# graph. This merge is carried out via the update(..) method.
if not removedFiles.empty() or not nodesToAdd.empty() :
lock = OpenThreads.ScopedLock<OpenThreads.Mutex>(_mutex)
_nodesToRemove.swap(removedFiles)
_nodesToAdd.swap(nodesToAdd)
requiresBlock = True
# now block so we don't try to load anything till the data has been merged
# otherwise _existingFilenameNodeMap will get out of sync.
if requiresBlock :
_updatesMergedBlock.block()
else:
OpenThreads.Thread.YieldCurrentThread()
# merge the changes with the main scene graph.
def update(scene):
# osg.notify(osg.NOTICE), "void update(Node*)"
group = dynamic_cast<osg.Group*>(scene)
if not group :
osg.notify(osg.NOTICE), "Error, MasterOperation.update(Node*) can only work with a Group as Viewer.getSceneData()."
return
lock = OpenThreads.ScopedLock<OpenThreads.Mutex>(_mutex)
if not _nodesToRemove.empty() or not _nodesToAdd.empty() :
osg.notify(osg.NOTICE), "update().................. "
if not _nodesToRemove.empty() :
for(Files.iterator itr = _nodesToRemove.begin()
not = _nodesToRemove.end()
++itr)
fnmItr = _existingFilenameNodeMap.find(*itr)
if fnmItr not = _existingFilenameNodeMap.end() :
osg.notify(osg.NOTICE), " update():removing ", *itr
group.removeChild(fnmItr.second)
_existingFilenameNodeMap.erase(fnmItr)
_nodesToRemove.clear()
if not _nodesToAdd.empty() :
for(FilenameNodeMap.iterator itr = _nodesToAdd.begin()
not = _nodesToAdd.end()
++itr)
osg.notify(osg.NOTICE), " update():inserting ", itr.first
group.addChild(itr.second)
_existingFilenameNodeMap[itr.first] = itr.second
_nodesToAdd.clear()
_updatesMergedBlock.release()
# add release implementation so that any thread cancellation can
# work even when blocks and barriers are used.
def release():
if _operationQueue.valid() : _operationQueue.removeAllOperations()
_updatesMergedBlock.release()
if _endOfCompilebarrier.valid() : _endOfCompilebarrier.release()
if _endOfLoadBlock.valid() : _endOfLoadBlock.release()
_filename = str()
_mutex = OpenThreads.Mutex()
_existingFilenameNodeMap = FilenameNodeMap()
_nodesToRemove = Files()
_nodesToAdd = FilenameNodeMap()
_updatesMergedBlock = OpenThreads.Block()
_incrementalCompileOperation = osgUtil.IncrementalCompileOperation()
_endOfCompilebarrier = osg.BarrierOperation()
_endOfLoadBlock = osg.RefBlockCount()
_operationQueue = osg.OperationQueue()
class FilterHandler (osgGA.GUIEventHandler) :
FilterHandler(osgTerrain.GeometryTechnique* gt):
_gt(gt)
def handle(ea, aa):
if not _gt : return False
switch(ea.getEventType())
case(osgGA.GUIEventAdapter.KEYDOWN):
if ea.getKey() == ord("g") :
osg.notify(osg.NOTICE), "Gaussian"
_gt.setFilterMatrixAs(osgTerrain.GeometryTechnique.GAUSSIAN)
return True
elif ea.getKey() == ord("s") :
osg.notify(osg.NOTICE), "Smooth"
_gt.setFilterMatrixAs(osgTerrain.GeometryTechnique.SMOOTH)
return True
elif ea.getKey() == ord("S") :
osg.notify(osg.NOTICE), "Sharpen"
_gt.setFilterMatrixAs(osgTerrain.GeometryTechnique.SHARPEN)
return True
elif ea.getKey() == ord("+") :
_gt.setFilterWidth(_gt.getFilterWidth()*1.1)
osg.notify(osg.NOTICE), "Filter width = ", _gt.getFilterWidth()
return True
elif ea.getKey() == ord("-") :
_gt.setFilterWidth(_gt.getFilterWidth()/1.1)
osg.notify(osg.NOTICE), "Filter width = ", _gt.getFilterWidth()
return True
elif ea.getKey() == ord(">") :
_gt.setFilterBias(_gt.getFilterBias()+0.1)
osg.notify(osg.NOTICE), "Filter bias = ", _gt.getFilterBias()
return True
elif ea.getKey() == ord("<") :
_gt.setFilterBias(_gt.getFilterBias()-0.1)
osg.notify(osg.NOTICE), "Filter bias = ", _gt.getFilterBias()
return True
break
default:
break
return False
_gt = osg.observer_ptr<osgTerrain.GeometryTechnique>()
class LayerHandler (osgGA.GUIEventHandler) :
LayerHandler(osgTerrain.Layer* layer):
_layer(layer)
def handle(ea, aa):
if not _layer : return False
scale = 1.2
switch(ea.getEventType())
case(osgGA.GUIEventAdapter.KEYDOWN):
if ea.getKey() == ord("q") :
_layer.transform(0.0, scale)
return True
elif ea.getKey() == ord("a") :
_layer.transform(0.0, 1.0/scale)
return True
break
default:
break
return False
_layer = osg.observer_ptr<osgTerrain.Layer>()
def main(argv):
arguments = osg.ArgumentParser(argv)
# construct the viewer.
viewer = osgViewer.Viewer(arguments)
# set up the camera manipulators.
keyswitchManipulator = osgGA.KeySwitchMatrixManipulator()
keyswitchManipulator.addMatrixManipulator( ord("1"), "Trackball", osgGA.TrackballManipulator() )
keyswitchManipulator.addMatrixManipulator( ord("2"), "Flight", osgGA.FlightManipulator() )
keyswitchManipulator.addMatrixManipulator( ord("3"), "Drive", osgGA.DriveManipulator() )
keyswitchManipulator.addMatrixManipulator( ord("4"), "Terrain", osgGA.TerrainManipulator() )
pathfile = str()
keyForAnimationPath = | |
<reponame>lebarsfa/vpython-wx
from __future__ import division
from .cvisual import vector
from .primitives import (label, curve, faces, points, distant_light)
from .create_display import display
from . import crayola
color = crayola
from numpy import (array, arange, ndarray, zeros, sort, searchsorted,
concatenate)
from math import modf, log10
import time
# A graph package for plotting a curve, with labeled axes and autoscaling
# <NAME>, begun April 2000
# Added crosshairs March 2011; added log-log and semilog plots April 2011
gdisplays = []
def checkGraphMouse(evt, gd):
try:
gd.mouse(evt)
except:
pass
# minmax[xaxis][negaxis], minmax[yaxis][negaxis] are minimum values;
# minmax[xaxis][posaxis], minmax[yaxis][posaxis] are maximum values.
grey = (0.7,0.7,0.7) # color of axes
tmajor = 10. # length of major tick marks in pixels
tminor = 5. # length of minor tick marks in pixels
border = 10. # border around graph
frac = 0.02 # fraction of range required before remaking axes
minorticks = 5 # number of minor tick intervals between major ticks
maxmajorticks = 3 # max number of major ticks (not including 0)
maxminorticks = (maxmajorticks+1)*minorticks # max number of minor ticks (4 between major ticks)
lastgdisplay = None # the most recently created gdisplay
gdotsize = 6.0 # diameter of gdot in pixels
dz = 0.01 # offset for plots relative to axes and labels
xaxis = 0
yaxis = 1
negaxis = 0
posaxis = 1
graphfont = "sans"
fontheight = 13 # font point size
charwidth = 9 # approximate character width
znormal = [0,0,1] # for faces
logticks = []
for i in range(4):
logticks.append(log10(2*i+2)) # for displaying minor tick marks with log graphs
def loglabelnum(x): # determine what log labels to show, in what format
number = abs(int(x))
if number <= 1.01:
marks = [1]
elif number <= 2.01:
marks = [1, 2]
elif number <= 3.01:
marks = [1, 2, 3]
else:
if not (number % 3): # is divisible by 3
marks = [int(number/3), int((2*number/3)), int(number)]
elif not (number % 2): # is divisible by 2
marks = [int(number/2), int(number)]
else:
marks = [int((number+1)/2), int(number+1)]
form = '1E{0}'
return marks, form
def labelnum(x, loglabel): # determine what labels to show, in what format
if loglabel:
return loglabelnum(x)
mantissa, exponent = modf(log10(x))
number = 10**mantissa
if number < 1:
number = 10*number
exponent = exponent-1
if number >= 7.49:
number = 7.5
marks = [2.5, 5.0, 7.5]
extra = 1
elif number >= 4.99:
number = 5
marks = [2.5, 5.0]
extra = 1
elif number >= 3.99:
number = 4
marks = [2.0, 4.0]
extra = 0
elif number >= 2.99:
number = 3
marks = [1.0, 2.0, 3.0]
extra = 0
elif number >= 1.99:
number = 3
marks = [1.0, 2.0]
extra = 0
elif number >= 1.49:
number = 1.5
marks = [0.5, 1.0, 1.5]
extra = 1
else:
number = 1
marks = [0.5, 1.0]
extra = 1
if exponent > 0:
digits = 0
else:
digits = int(-exponent)+extra
if digits < 3 and exponent <= 3:
form = '{0:0.'+'{0:s}'.format(str(digits))+'f}'
else:
form = '{0:0.1E}'
return (array(marks)*10**exponent).tolist(), form
def cleaneformat(string): # convert 2.5E-006 to 2.5E-6; 2.5E+006 to 2.5E6
index = string.find('E')
if index == -1: return string # not E format
index = index+1
if string[index] == '-':
index = index+1
elif string[index] == '+':
string = string[:index]+string[index+1:]
while index < len(string) and string[index] == '0':
string = string[:index]+string[index+1:]
if string[-1] == '-':
string = string[:-1]
if string[-1] == 'E':
string = string[:-1]
return string
class gdisplay:
def __init__(self, window=None, x=0, y=0, width=800, height=400,
title=None, xtitle=None, ytitle=None,
xmax=None, xmin=None, ymax=None, ymin=None,
logx=False, logy=False, visible=True,
foreground=None, background=None):
global lastgdisplay
lastgdisplay = self
currentdisplay = display.get_selected()
if title is None:
title = 'Graph'
self.width = width
self.height = height
if foreground is not None:
self.foreground = foreground
else:
self.foreground = color.white
if background is not None:
self.background = background
else:
self.background = color.black
self.visible = visible
self.active = False
if window:
self.display = display(window=window,title=title, x=x, y=y,
width=self.width, height=self.height,
foreground=self.foreground, background=self.background,
fov=0.01, userspin=False, uniform=False, autoscale=False,
lights=[], ambient=color.gray(0))
else:
self.display = display(title=title, x=x, y=y, visible=self.visible,
width=self.width, height=self.height,
foreground=self.foreground, background=self.background,
fov=0.01, userspin=False, uniform=False, autoscale=False,
lights=[], ambient=color.gray(0))
distant_light(direction=(0,0,1), color=color.white)
self.autoscale = [1, 1]
self.logx = logx
self.logy = logy
self.xtitle = xtitle
self.ytitle = ytitle
self.Lxtitle = label(display=self.display, visible=False, text="",
font=graphfont, height=fontheight, border=2,
xoffset=tminor, opacity=0, box=False, line=False)
self.Lytitle = label(display=self.display, visible=False, text="",
font=graphfont, height=fontheight, border=2,
xoffset=tminor, opacity=0, box=False, line=False)
if xtitle is not None: self.Lxtitle.text = xtitle
self.xtitlewidth = len(self.Lxtitle.text)*charwidth
if ytitle is not None: self.Lytitle.text = ytitle
self.ytitlewidth = len(self.Lytitle.text)*charwidth
self.mousepos = None
self.showxy = label(display=self.display, color=self.foreground,
background=self.background,
xoffset=10, yoffset=8, border=0,
opacity=0, box=False, line=False, visible=False)
gray = color.gray(0.5)
self.horline = curve(display=self.display, color=gray, visible=False)
self.vertline = curve(display=self.display, color=gray, visible=False)
# For all axis-related quantities: [x axis 0 or y axis 1][neg axis 0 or pos axis 1]
zerotextx = zerotexty = '0'
if self.logx:
zerotextx = '1'
if self.logy:
zerotexty = '1'
self.zero = [label(display=self.display, pos=(0,0,0), text=zerotextx,
color=self.foreground, visible=False,
font=graphfont, height=fontheight, border=0,
yoffset=-tmajor, linecolor=grey, box=0, opacity=0),
label(display=self.display, pos=(0,0,0), text=zerotexty,
color=self.foreground, visible=False,
font=graphfont, height=fontheight, border=2,
xoffset=-tmajor, linecolor=grey, box=0, opacity=0)]
self.axis = [[None, None], [None, None]]
self.makeaxis = [[True, True], [True, True]]
self.lastlabel = [[0., 0.], [0., 0.]]
self.format = [None, None]
self.majormarks = [[None, None], [None, None]]
self.lastminmax = [[0., 0.], [0., 0.]]
self.minmax = [[0., 0.], [0., 0.]] # [x or y][negative 0 or positive 1]
if self.logx:
if xmax is not None:
if xmax <= 0:
raise AttributeError("For a log scale, xmax must greater than zero")
else: xmax = log10(float(xmax))
if xmin is not None:
if xmin <= 0:
raise AttributeError("For a log scale, xmin must be greater than zero")
else: xmin = log10(float(xmin))
if self.logy:
if ymax is not None:
if ymax <= 0:
raise AttributeError("For a log scale, ymax must greater than zero")
else: ymax = log10(float(ymax))
if ymin is not None:
if ymin <= 0:
raise AttributeError("For a log scale, ymin must be greater than zero")
else: ymin = log10(float(ymin))
x0 = y0 = 0
if xmax is not None:
self.minmax[xaxis][posaxis] = xmax
self.autoscale[xaxis] = False
if xmax < 0:
marks, form = labelnum(-xmax, self.logx)
self.zero[xaxis].text = cleaneformat(form.format(xmax))
self.makeaxis[xaxis][posaxis] = False
if (xmin is None):
raise AttributeError("xmin must be specified to be less than xmax")
x0 = xmax
if (xmin is not None) and (xmin >= xmax):
raise AttributeError("xmax must be greater than xmin")
if xmin is not None:
self.minmax[xaxis][negaxis] = xmin
self.autoscale[xaxis] = False
if xmin > 0:
marks, form = labelnum(xmin, self.logx)
self.zero[xaxis].text = cleaneformat(form.format(xmin))
self.makeaxis[xaxis][negaxis] = False
if (xmax is None):
raise AttributeError("xmax must be specified to be greater than xmin")
x0 = xmin
if (xmax is not None) and (xmin >= xmax):
raise AttributeError("xmax must be greater than xmin")
if ymax is not None:
self.minmax[yaxis][posaxis] = ymax
self.autoscale[yaxis] = False
if ymax < 0:
marks, form = labelnum(-ymax, self.logy)
self.zero[yaxis].text = cleaneformat(form.format(ymax))
self.makeaxis[yaxis][posaxis] = False
if (ymin is None):
raise AttributeError("ymin must be specified to be less than ymax")
y0 = ymax
if (ymin is not None) and (ymin >= ymax):
raise AttributeError("ymax must be greater than ymin")
if ymin is not None:
self.minmax[yaxis][negaxis] = ymin
self.autoscale[yaxis] = False
if ymin > 0:
marks, form = labelnum(ymin, self.logy)
self.zero[yaxis].text = cleaneformat(form.format(ymin))
self.makeaxis[yaxis][negaxis] = False
if (ymax is None):
raise AttributeError("ymax must be specified to be greater than ymin")
y0 = ymin
if (ymax is not None) and (ymin >= ymax):
raise AttributeError("ymax must be greater than ymin")
self.zero[0].pos = (x0, y0, 0)
self.zero[1].pos = (x0, y0, 0)
self.display.range = 1e-300
self.minorticks = [ [ [], [] ], [ [],[] ] ] # all the minor ticks we'll ever use
for axis in range(2):
for axissign in range(2):
for nn in range(maxminorticks):
if axis == xaxis:
self.minorticks[axis][axissign].append(label(display=self.display, yoffset=-tminor,
font=graphfont, height=fontheight, border=0,
linecolor=grey, visible=False, box=False, opacity=0))
else:
self.minorticks[axis][axissign].append(label(display=self.display, xoffset=-tminor,
font=graphfont, height=fontheight, border=0,
linecolor=grey, visible=False, box=False, opacity=0))
self.majorticks = [ [ [], [] ], [ [],[] ] ] # all the major ticks we'll ever use
for axis in range(2):
for axissign in range(2):
for nn | |
"",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"PRON": {
"symbol": "PRON",
"address": "0xA3149E0fA0061A9007fAf307074cdCd290f0e2Fd",
"decimals": 8,
"name": "PronCoin",
"ens_address": "",
"website": "https://proncoin.io",
"logo": {
"src": "https://pbs.twimg.com/profile_images/943518409899302912/_qB2ilhB_400x400.jpg",
"width": "400",
"height": "400",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": "https://proncoin.io"
},
"social": {
"blog": "",
"chat": "",
"facebook": "https://www.facebook.com/PronCoin-914325038734407",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/PronCoin",
"youtube": ""
}
},
"IFT": {
"symbol": "IFT",
"address": "0x7654915A1b82D6D2D0AFc37c52Af556eA8983c7E",
"decimals": 18,
"name": "InvestFeed",
"ens_address": "",
"website": "https://investfeed.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://medium.com/@investFeed",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://reddit.com/r/investFeedOfficial",
"slack": "",
"telegram": "https://t.me/investfeed",
"twitter": "https://twitter.com/investFeed",
"youtube": ""
}
},
"SALT": {
"symbol": "SALT",
"address": "0x4156D3342D5c385a87D264F90653733592000581",
"decimals": 8,
"name": "<NAME>",
"ens_address": "",
"website": "https://saltlending.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "saltcommunity.slack.com",
"telegram": "",
"twitter": "https://twitter.com/SaltLending",
"youtube": ""
}
},
"LINK Platform": {
"symbol": "LINK Platform",
"address": "0xE2E6D4BE086c6938B53B22144855eef674281639",
"decimals": 18,
"name": "Link Platform",
"ens_address": "",
"website": "https://ethereum.link",
"logo": {
"src": "https://etherscan.io/token/images/linkplatform28.png",
"width": 28,
"height": 28,
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://www.medium.com/@ethlink",
"chat": "",
"facebook": "https://www.facebook.com/ethereumlink",
"forum": "",
"github": "https://github.com/ethlink",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "https://ethereum.link/invite.php",
"telegram": "",
"twitter": "https://www.twitter.com/linkplatform",
"youtube": ""
}
},
"ENC": {
"symbol": "ENC",
"address": "0x039F5050dE4908f9b5ddF40A4F3Aa3f329086387",
"decimals": 18,
"name": "Ethernet.Cash",
"ens_address": "",
"website": "https://ethernet.cash",
"logo": {
"src": "https://ethernet.cash/images/logo28x28.png",
"width": "28",
"height": "28",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://medium.com/@ethernetcash",
"chat": "",
"facebook": "https://fb.me/ethernetcash.official",
"forum": "",
"github": "https://github.com/ethernetcash",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "https://t.me/ethernetcash",
"twitter": "https://twitter.com/ethernetcash",
"youtube": ""
}
},
"GRID": {
"symbol": "GRID",
"address": "0x12B19D3e2ccc14Da04FAe33e63652ce469b3F2FD",
"decimals": 12,
"name": "Grid+",
"ens_address": "",
"website": "http://gridplus.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://blog.gridplus.io",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/gridplus",
"youtube": ""
}
},
"WPC": {
"symbol": "WPC",
"address": "0x62087245087125d3DB5B9A3D713d78E7BBc31e54",
"decimals": 18,
"name": "WorldPeaceCoin",
"ens_address": "",
"website": "http://www.worldpeacecoin.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "https://www.facebook.com/worldpeacecoin",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/WorldPeaceCoin1",
"youtube": ""
}
},
"SMS": {
"symbol": "SMS",
"name": "Speed Mining Service",
"type": "ERC20",
"address": "0x39013F961c378f02C2b82A6E1d31E9812786FD9D",
"ens_address": "",
"decimals": 3,
"website": "https://smscoin.jp/en",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/Speed_Mining",
"youtube": ""
}
},
"IST34": {
"symbol": "IST34",
"address": "0x0cF713b11C9b986EC40D65bD4F7fbd50F6ff2d64",
"decimals": 18,
"name": "<NAME>",
"ens_address": "",
"website": "https://hiperteknoloji.org",
"logo": {
"src": "https://hiperteknoloji.org/ht/ist34-token-28.png",
"width": "28",
"height": "28",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://hiperteknoloji.org/2018/05/10/ist34token/",
"chat": "",
"facebook": "https://www.facebook.com/ist34token",
"forum": "https://bitcoingarden.org/forum/index.php?topic=33783",
"github": "https://github.com/IST34Token",
"gitter": "https://gitter.im/IST34-Token",
"instagram": "https://www.instagram.com/ist34_token/",
"linkedin": "https://www.linkedin.com/in/ist34-token",
"reddit": "https://www.reddit.com/user/IST34_Token",
"slack": "https://ist34token.slack.com",
"telegram": "https://t.me/IST34Token",
"twitter": "https://twitter.com/IST34_Token",
"youtube": "https://www.youtube.com/channel/UCwEbCIn8VkPMBXuyyg8Ia8w"
}
},
"ICO": {
"symbol": "ICO",
"address": "0xa33e729bf4fdeb868B534e1f20523463D9C46bEe",
"decimals": 10,
"name": "ICO",
"ens_address": "",
"website": "http://icocoin.org",
"logo": {
"src": "https://etherscan.io/token/images/icocoin_28.png",
"width": 28,
"height": 28,
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "https://u.wechat.com/EM6Tgldvr3Wn9eprwIszuSo",
"facebook": "https://www.facebook.com/coin.ico.7",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/icocoin1",
"youtube": ""
}
},
"DOR": {
"symbol": "DOR",
"name": "Dorado",
"type": "ERC20",
"address": "0x906b3f8b7845840188Eab53c3f5AD348A787752f",
"ens_address": "",
"decimals": 15,
"website": "https://www.dorado.tech",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://medium.com/@doradoico/latest",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/DoradoICO",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/Dorado_ICO",
"youtube": ""
}
},
"HBT": {
"symbol": "HBT",
"name": "Hubii Network",
"type": "ERC20",
"address": "0xDd6C68bb32462e01705011a4e2Ad1a60740f217F",
"ens_address": "",
"decimals": 15,
"website": "https://www.hubii.network",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://medium.com/@jacobotoll",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/hubiinetwork",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/hubiinetwork",
"youtube": ""
}
},
"BDG": {
"symbol": "BDG",
"address": "0x1961B3331969eD52770751fC718ef530838b6dEE",
"decimals": 18,
"name": "BitDegree Token",
"ens_address": "",
"website": "https://bitdegree.org",
"logo": {
"src": "https://raw.githubusercontent.com/bitdegree/banners/master/logos/2515x3285_letter_black.png",
"width": 28,
"height": 28,
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "blog.bitdegree.org",
"chat": "",
"facebook": "www.facebook.com/bitdegree.org",
"forum": "",
"github": "https://github.com/bitdegree",
"gitter": "",
"instagram": "https://www.instagram.com/bitdegree",
"linkedin": "https://www.linkedin.com/company/bitdegree",
"reddit": "reddit.com/r/bitdegree",
"slack": "",
"telegram": "t.me/bitdegree",
"twitter": "https://twitter.com/bitdegree_org",
"youtube": "https://www.youtube.com/channel/UCuiGDksOmsM8y-_txG3wPYg"
}
},
"COB": {
"symbol": "COB",
"address": "0xb2F7EB1f2c37645bE61d73953035360e768D81E6",
"decimals": 18,
"name": "<NAME>",
"ens_address": "",
"website": "https://cobinhood.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://medium.com/@cobinhood",
"chat": "",
"facebook": "",
"forum": "",
"github": "https://github.com/cobinhood",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "http://slack.cobinhood.com",
"telegram": "https://t.me/cobinhood",
"twitter": "https://twitter.com/cobinhood",
"youtube": ""
}
},
"CPY": {
"symbol": "CPY",
"address": "0xf44745fBd41F6A1ba151df190db0564c5fCc4410",
"decimals": 18,
"name": "COPYTRACK",
"ens_address": "",
"website": "https://copytrack.io",
"logo": {
"src": "https://cdn.copytrack.io/media/cpy.png?auto=compress&w=200",
"width": 200,
"height": 200,
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": "https://copytrack.io"
},
"social": {
"blog": "https://medium.com/aditusnetwork",
"chat": "",
"facebook": "https://www.facebook.com/COPYTRACK",
"forum": "",
"github": "https://github.com/aditus",
"gitter": "",
"instagram": "https://www.instagram.com/copytrack",
"linkedin": "https://www.linkedin.com/company/10840600",
"reddit": "",
"slack": "",
"telegram": "https://t.me/copytrackhq",
"twitter": "https://twitter.com/CopytrackHQ",
"youtube": ""
}
},
"BTL (Battle)": {
"symbol": "BTL (Battle)",
"address": "0x2accaB9cb7a48c3E82286F0b2f8798D201F4eC3f",
"decimals": 18,
"name": "BTL (Battle)",
"ens_address": "",
"website": "http://persians.network",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "https://github.com/Neurone/persians",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "https://battlesmartcontract.slack.com",
"telegram": "",
"twitter": "https://twitter.com/persian_token",
"youtube": ""
}
},
"RNT": {
"symbol": "RNT",
"name": "OneRoot Network",
"type": "ERC20",
"address": "0xFF603F43946A3A28DF5E6A73172555D8C8b02386",
"ens_address": "",
"decimals": 18,
"website": "https://www.oneroot.io/en",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/OneRootNetwork",
"youtube": ""
}
},
"NxC": {
"symbol": "NxC",
"address": "0x45e42D659D9f9466cD5DF622506033145a9b89Bc",
"decimals": 3,
"name": "Nexium",
"ens_address": "",
"website": "https://beyond-the-void.net",
"logo": {
"src": "https://www.beyond-the-void.net/nxc.png",
"width": 300,
"height": 300,
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://www.beyond-the-void.net/wiki/posts",
"chat": "https://discordapp.com/invite/C7TqmaQ",
"facebook": "https://www.facebook.com/beyondvoid",
"forum": "https://bitcointalk.org/index.php?topic=1630816.0",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/BeyondTheVoidGame",
"slack": "http://beyond-the-void.slack.com",
"telegram": "",
"twitter": "https://twitter.com/BeyondVoidGame",
"youtube": "https://www.youtube.com/channel/UCD1IdjsnzXFdOarY20gMPQQ"
}
},
"SLT": {
"symbol": "SLT",
"address": "0x7A5fF295Dc8239d5C2374E4D894202aAF029Cab6",
"decimals": 3,
"name": "Smartlands",
"ens_address": "",
"website": "http://smartlands.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": "<EMAIL>"
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"TUSD": {
"symbol": "TUSD",
"name": "TrueUSD",
"type": "ERC20",
"address": "0x0000000000085d4780B73119b644AE5ecd22b376",
"ens_address": "",
"decimals": 18,
"website": "https://www.trusttoken.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://blog.trusttoken.com",
"chat": "",
"facebook": "",
"forum": "",
"github": "https://github.com/trusttoken",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/TrustToken/",
"slack": "",
| |
<gh_stars>10-100
# Copyright (c) 2013. Librato, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Librato, Inc. nor the names of project contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL LIBRATO, INC. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import six
import platform
import time
import logging
import os
from six.moves import http_client
from six.moves import map
from six import string_types
import urllib
import base64
import json
import email.message
from librato import exceptions
from librato.queue import Queue
from librato.metrics import Gauge, Counter, Metric
from librato.alerts import Alert, Service
from librato.annotations import Annotation
from librato.spaces import Space, Chart
__version__ = "3.1.0"
# Defaults
HOSTNAME = "metrics-api.librato.com"
BASE_PATH = "/v1/"
DEFAULT_TIMEOUT = 10
log = logging.getLogger("librato")
# Alias HTTPSConnection so the tests can mock it out.
HTTPSConnection = http_client.HTTPSConnection
HTTPConnection = http_client.HTTPConnection
# Alias urlencode, it moved between py2 and py3.
try:
urlencode = urllib.parse.urlencode # py3
except AttributeError:
urlencode = urllib.urlencode # py2
def sanitize_metric_name(metric_name):
disallowed_character_pattern = r"(([^A-Za-z0-9.:\-_]|[\[\]]|\s)+)"
max_metric_name_length = 255
return re.sub(disallowed_character_pattern, '-', metric_name)[:max_metric_name_length]
def sanitize_no_op(metric_name):
"""
Default behavior, some people want the error
"""
return metric_name
class LibratoConnection(object):
"""Librato API Connection.
Usage:
>>> conn = LibratoConnection(username, api_key)
>>> conn.list_metrics()
[...]
"""
def __init__(self, username, api_key, hostname=HOSTNAME, base_path=BASE_PATH, sanitizer=sanitize_no_op,
protocol="https", tags={}):
"""Create a new connection to Librato Metrics.
Doesn't actually connect yet or validate until you make a request.
:param username: The username (email address) of the user to connect as
:type username: str
:param api_key: The API Key (token) to use to authenticate
:type api_key: str
"""
try:
self.username = username.encode('ascii')
self.api_key = api_key.encode('ascii')
except:
raise TypeError("Librato only supports ascii for the credentials")
if protocol not in ["http", "https"]:
raise ValueError("Unsupported protocol: {}".format(protocol))
self.custom_ua = None
self.protocol = protocol
self.hostname = hostname
self.base_path = base_path
# these two attributes ared used to control fake server errors when doing
# unit testing.
self.fake_n_errors = 0
self.backoff_logic = lambda backoff: backoff * 2
self.sanitize = sanitizer
self.timeout = DEFAULT_TIMEOUT
self.tags = dict(tags)
def _compute_ua(self):
if self.custom_ua:
return self.custom_ua
else:
# http://en.wikipedia.org/wiki/User_agent#Format
# librato-metrics/1.0.3 (ruby; 1.9.3p385; x86_64-darwin11.4.2) direct-faraday/0.8.4
ua_chunks = [] # Set user agent
ua_chunks.append("python-librato/" + __version__)
p = platform
system_info = (p.python_version(), p.machine(), p.system(), p.release())
ua_chunks.append("(python; %s; %s-%s%s)" % system_info)
return ' '.join(ua_chunks)
def __getattr__(self, attr):
def handle_undefined_method(*args):
if re.search('dashboard|instrument', attr):
print("We have deprecated support for instruments and dashboards.")
print("https://github.com/librato/python-librato")
print("")
raise NotImplementedError()
return handle_undefined_method
def _set_headers(self, headers):
""" set headers for request """
if headers is None:
headers = {}
headers['Authorization'] = b"Basic " + base64.b64encode(self.username + b":" + self.api_key).strip()
headers['User-Agent'] = self._compute_ua()
return headers
def _url_encode_params(self, params={}):
if not isinstance(params, dict):
raise Exception("You must pass in a dictionary!")
params_list = []
for k, v in params.items():
if isinstance(v, list):
params_list.extend([(k + '[]', x) for x in v])
else:
params_list.append((k, v))
return urlencode(params_list)
def _make_request(self, conn, path, headers, query_props, method):
""" Perform the an https request to the server """
uri = self.base_path + path
body = None
if query_props:
if method == "POST" or method == "DELETE" or method == "PUT":
body = json.dumps(query_props)
headers['Content-Type'] = "application/json"
else:
uri += "?" + self._url_encode_params(query_props)
log.info("method=%s uri=%s" % (method, uri))
log.info("body(->): %s" % body)
conn.request(method, uri, body=body, headers=headers)
return conn.getresponse()
def _process_response(self, resp, backoff):
""" Process the response from the server """
success = True
resp_data = None
not_a_server_error = resp.status < 500
if not_a_server_error:
resp_data = _decode_body(resp)
a_client_error = resp.status >= 400
if a_client_error:
raise exceptions.get(resp.status, resp_data)
return resp_data, success, backoff
else: # A server error, wait and retry
backoff = self.backoff_logic(backoff)
log.info("%s: waiting %s before re-trying" % (resp.status, backoff))
time.sleep(backoff)
return None, not success, backoff
def _parse_tags_params(self, tags):
result = {}
for k, v in tags.items():
result["tags[%s]" % k] = v
return result
def _mexe(self, path, method="GET", query_props=None, p_headers=None):
"""Internal method for executing a command.
If we get server errors we exponentially wait before retrying
"""
conn = self._setup_connection()
headers = self._set_headers(p_headers)
success = False
backoff = 1
resp_data = None
while not success:
resp = self._make_request(conn, path, headers, query_props, method)
try:
resp_data, success, backoff = self._process_response(resp, backoff)
except http_client.ResponseNotReady:
conn.close()
conn = self._setup_connection()
conn.close()
return resp_data
def _do_we_want_to_fake_server_errors(self):
return self.fake_n_errors > 0
def _setup_connection(self):
connection_class = HTTPSConnection if self.protocol == "https" else HTTPConnection
if self._do_we_want_to_fake_server_errors():
return connection_class(self.hostname, fake_n_errors=self.fake_n_errors)
else:
return connection_class(self.hostname, timeout=self.timeout)
def _parse(self, resp, name, cls):
"""Parse to an object"""
if name in resp:
return [cls.from_dict(self, m) for m in resp[name]]
else:
return resp
# Get a shallow copy of the top-level tag set
def get_tags(self):
return dict(self.tags)
# Define the top-level tag set for posting measurements
def set_tags(self, d):
self.tags = dict(d) # Create a copy
# Add to the top-level tag set
def add_tags(self, d):
self.tags.update(d)
# Return all items for a "list" request
def _get_paginated_results(self, entity, klass, **query_props):
resp = self._mexe(entity, query_props=query_props)
results = self._parse(resp, entity, klass)
for result in results:
yield result
length = resp.get('query', {}).get('length', 0)
offset = query_props.get('offset', 0) + length
total = resp.get('query', {}).get('total', length)
if offset < total and length > 0:
query_props.update({'offset': offset})
for result in self._get_paginated_results(entity, klass, **query_props):
yield result
#
# Metrics
#
def list_metrics(self, **query_props):
"""List a page of metrics"""
resp = self._mexe("metrics", query_props=query_props)
return self._parse(resp, "metrics", Metric)
def list_all_metrics(self, **query_props):
return self._get_paginated_results("metrics", Metric, **query_props)
def submit(self, name, value, type="gauge", **query_props):
if 'tags' in query_props or self.get_tags():
self.submit_tagged(name, value, **query_props)
else:
payload = {'gauges': [], 'counters': []}
metric = {'name': self.sanitize(name), 'value': value}
for k, v in query_props.items():
metric[k] = v
payload[type + 's'].append(metric)
self._mexe("metrics", method="POST", query_props=payload)
def submit_tagged(self, name, value, **query_props):
payload = {'measurements': []}
payload['measurements'].append(self.create_tagged_payload(name, value, **query_props))
self._mexe("measurements", method="POST", query_props=payload)
def create_tagged_payload(self, name, value, **query_props):
"""Create the measurement for forwarding to Librato"""
measurement = {
'name': self.sanitize(name),
'value': value
}
if 'tags' in query_props:
inherit_tags = query_props.pop('inherit_tags', False)
if inherit_tags:
tags = query_props.pop('tags', {})
measurement['tags'] = dict(self.get_tags(), **tags)
elif self.tags:
measurement['tags'] = self.tags
for k, v in query_props.items():
measurement[k] = v
return measurement
def get(self, name, **query_props):
resp = self._mexe("metrics/%s" % self.sanitize(name), method="GET", query_props=query_props)
if resp['type'] == 'gauge':
return Gauge.from_dict(self, resp)
elif resp['type'] == 'counter':
return Counter.from_dict(self, resp)
else:
raise Exception('The server sent me something that is not a Gauge nor a Counter.')
def get_tagged(self, name, **query_props):
"""Fetches multi-dimensional metrics"""
if 'resolution' not in query_props:
# Default to raw resolution
query_props['resolution'] = 1
if 'start_time' not in query_props and 'duration' not in query_props:
raise Exception("You must provide 'start_time' or 'duration'")
if 'start_time' in query_props and 'end_time' in query_props and 'duration' in query_props:
raise Exception("It is an error to set 'start_time', 'end_time' and 'duration'")
if 'tags' in query_props:
parsed_tags = self._parse_tags_params(query_props.pop('tags'))
query_props.update(parsed_tags)
return self._mexe("measurements/%s" % self.sanitize(name), method="GET", query_props=query_props)
def get_measurements(self, name, **query_props):
return self.get_tagged(name, **query_props)
def get_composite(self, compose, **query_props):
if self.get_tags():
return self.get_composite_tagged(compose, **query_props)
else:
if 'resolution' not in query_props:
# Default to raw resolution
query_props['resolution'] = 1
if 'start_time' not in query_props:
raise Exception("You must provide a 'start_time'")
query_props['compose'] = compose
return self._mexe('metrics', method="GET", query_props=query_props)
def get_composite_tagged(self, compose, **query_props):
if 'resolution' not in query_props:
# Default to raw resolution
query_props['resolution'] = 1
if 'start_time' not in query_props:
raise Exception("You must provide | |
<filename>xformer/transformer.py
import tensorflow as tf
class BaseModel:
def on_train_start(self): pass
def on_epoch_end(self): pass
def on_fit_end(self): pass
def on_epoch_start(self): pass
def fit(self, dl, n_epochs=1, callbacks=None):
self.dl = dl
self.n_epochs = n_epochs
self.on_train_start()
if callbacks is not None:
for cb in callbacks:
cb.on_train_start(self)
for epoch in range(n_epochs):
self.on_epoch_start()
if callbacks is not None:
for cb in callbacks:
cb.on_epoch_start(self)
self.epoch = epoch
self.n_batches = len(dl)
print(f'Epoch {epoch+1}/{n_epochs}')
pbar = tf.keras.utils.Progbar(target=self.n_batches)
for idx, batch in enumerate(dl):
self.batch_idx = idx
loss_dict = self.train_step(epoch, idx, batch)
pbar.update(idx, values=list(loss_dict.items()))
pbar.update(self.n_batches, values=None)
self.on_epoch_end()
if callbacks is not None:
for cb in callbacks:
cb.on_epoch_end(self)
self.on_fit_end()
if callbacks is not None:
for cb in callbacks:
cb.on_fit_end(self)
import torch
import torch.nn as nn
class Encoder(nn.Module):
def __init__(
self,
input_dim,
hid_dim,
n_layers,
n_heads,
pf_dim,
dropout,
device,
max_length = 100,
token_embedding=True,
):
super().__init__()
self.token_embedding = token_embedding
self.device = device
if token_embedding:
self.tok_embedding = nn.Embedding(input_dim, hid_dim)
else:
self.tok_embedding = nn.Linear(input_dim, hid_dim)
self.pos_embedding = nn.Embedding(max_length, hid_dim)
self.layers = nn.ModuleList([EncoderLayer(hid_dim,
n_heads,
pf_dim,
dropout,
device)
for _ in range(n_layers)])
self.dropout = nn.Dropout(dropout)
self.scale = torch.sqrt(torch.FloatTensor([hid_dim])).to(device)
def forward(self, src, src_mask):
#src = One of: [batch size, src len], [batch size, src len, input dim]
#src_mask = [batch size, 1, 1, src len]
batch_size = src.shape[0]
src_len = src.shape[1]
pos = torch.arange(0, src_len).unsqueeze(0).repeat(batch_size, 1).to(self.device)
#pos = [batch size, src len]
src_emb = self.tok_embedding(src)
src = self.dropout((src_emb * self.scale) + self.pos_embedding(pos))
#src = [batch size, src len, hid dim]
for layer in self.layers:
src = layer(src, src_mask)
#src = [batch size, src len, hid dim]
return src
class EncoderLayer(nn.Module):
def __init__(self,
hid_dim,
n_heads,
pf_dim,
dropout,
device):
super().__init__()
self.self_attn_layer_norm = nn.LayerNorm(hid_dim)
self.ff_layer_norm = nn.LayerNorm(hid_dim)
self.self_attention = MultiHeadAttentionLayer(hid_dim, n_heads, dropout, device)
self.positionwise_feedforward = PositionwiseFeedforwardLayer(hid_dim,
pf_dim,
dropout)
self.dropout = nn.Dropout(dropout)
def forward(self, src, src_mask):
#src = [batch size, src len, hid dim]
#src_mask = [batch size, 1, 1, src len]
#self attention
_src, _ = self.self_attention(src, src, src, src_mask)
#dropout, residual connection and layer norm
src = self.self_attn_layer_norm(src + self.dropout(_src))
#src = [batch size, src len, hid dim]
#positionwise feedforward
_src = self.positionwise_feedforward(src)
#dropout, residual and layer norm
src = self.ff_layer_norm(src + self.dropout(_src))
#src = [batch size, src len, hid dim]
return src
class MultiHeadAttentionLayer(nn.Module):
def __init__(self, hid_dim, n_heads, dropout, device):
super().__init__()
assert hid_dim % n_heads == 0
self.hid_dim = hid_dim
self.n_heads = n_heads
self.head_dim = hid_dim // n_heads
self.fc_q = nn.Linear(hid_dim, hid_dim)
self.fc_k = nn.Linear(hid_dim, hid_dim)
self.fc_v = nn.Linear(hid_dim, hid_dim)
self.fc_o = nn.Linear(hid_dim, hid_dim)
self.dropout = nn.Dropout(dropout)
self.scale = torch.sqrt(torch.FloatTensor([self.head_dim])).to(device)
def forward(self, query, key, value, mask = None):
batch_size = query.shape[0]
#query = [batch size, query len, hid dim]
#key = [batch size, key len, hid dim]
#value = [batch size, value len, hid dim]
Q = self.fc_q(query)
K = self.fc_k(key)
V = self.fc_v(value)
#Q = [batch size, query len, hid dim]
#K = [batch size, key len, hid dim]
#V = [batch size, value len, hid dim]
Q = Q.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
K = K.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
V = V.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)
#Q = [batch size, n heads, query len, head dim]
#K = [batch size, n heads, key len, head dim]
#V = [batch size, n heads, value len, head dim]
energy = torch.matmul(Q, K.permute(0, 1, 3, 2)) / self.scale
#energy = [batch size, n heads, query len, key len]
if mask is not None:
energy = energy.masked_fill(mask == 0, -1e10)
attention = torch.softmax(energy, dim = -1)
#attention = [batch size, n heads, query len, key len]
x = torch.matmul(self.dropout(attention), V)
#x = [batch size, n heads, query len, head dim]
x = x.permute(0, 2, 1, 3).contiguous()
#x = [batch size, query len, n heads, head dim]
x = x.view(batch_size, -1, self.hid_dim)
#x = [batch size, query len, hid dim]
x = self.fc_o(x)
#x = [batch size, query len, hid dim]
return x, attention
class PositionwiseFeedforwardLayer(nn.Module):
def __init__(self, hid_dim, pf_dim, dropout):
super().__init__()
self.fc_1 = nn.Linear(hid_dim, pf_dim)
self.fc_2 = nn.Linear(pf_dim, hid_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
#x = [batch size, seq len, hid dim]
x = self.dropout(torch.relu(self.fc_1(x)))
#x = [batch size, seq len, pf dim]
x = self.fc_2(x)
#x = [batch size, seq len, hid dim]
return x
class Decoder(nn.Module):
def __init__(self,
output_dim,
hid_dim,
n_layers,
n_heads,
pf_dim,
dropout,
device,
max_length = 100):
super().__init__()
self.device = device
self.tok_embedding = nn.Embedding(output_dim, hid_dim)
self.pos_embedding = nn.Embedding(max_length, hid_dim)
self.layers = nn.ModuleList([DecoderLayer(hid_dim,
n_heads,
pf_dim,
dropout,
device)
for _ in range(n_layers)])
self.fc_out = nn.Linear(hid_dim, output_dim)
self.dropout = nn.Dropout(dropout)
self.scale = torch.sqrt(torch.FloatTensor([hid_dim])).to(device)
def forward(self, trg, enc_src, trg_mask, src_mask):
#trg = [batch size, trg len]
#enc_src = [batch size, src len, hid dim]
#trg_mask = [batch size, 1, trg len, trg len]
#src_mask = [batch size, 1, 1, src len]
batch_size = trg.shape[0]
trg_len = trg.shape[1]
pos = torch.arange(0, trg_len).unsqueeze(0).repeat(batch_size, 1).to(self.device)
#pos = [batch size, trg len]
trg = self.dropout((self.tok_embedding(trg) * self.scale) + self.pos_embedding(pos))
#trg = [batch size, trg len, hid dim]
for layer in self.layers:
trg, attention = layer(trg, enc_src, trg_mask, src_mask)
#trg = [batch size, trg len, hid dim]
#attention = [batch size, n heads, trg len, src len]
output = self.fc_out(trg)
#output = [batch size, trg len, output dim]
return output, attention
class DecoderLayer(nn.Module):
def __init__(self,
hid_dim,
n_heads,
pf_dim,
dropout,
device):
super().__init__()
self.self_attn_layer_norm = nn.LayerNorm(hid_dim)
self.enc_attn_layer_norm = nn.LayerNorm(hid_dim)
self.ff_layer_norm = nn.LayerNorm(hid_dim)
self.self_attention = MultiHeadAttentionLayer(hid_dim, n_heads, dropout, device)
self.encoder_attention = MultiHeadAttentionLayer(hid_dim, n_heads, dropout, device)
self.positionwise_feedforward = PositionwiseFeedforwardLayer(hid_dim,
pf_dim,
dropout)
self.dropout = nn.Dropout(dropout)
def forward(self, trg, enc_src, trg_mask, src_mask):
#trg = [batch size, trg len, hid dim]
#enc_src = [batch size, src len, hid dim]
#trg_mask = [batch size, 1, trg len, trg len]
#src_mask = [batch size, 1, 1, src len]
#self attention
_trg, _ = self.self_attention(trg, trg, trg, trg_mask)
#dropout, residual connection and layer norm
trg = self.self_attn_layer_norm(trg + self.dropout(_trg))
#trg = [batch size, trg len, hid dim]
#encoder attention
_trg, attention = self.encoder_attention(trg, enc_src, enc_src, src_mask)
#dropout, residual connection and layer norm
trg = self.enc_attn_layer_norm(trg + self.dropout(_trg))
#trg = [batch size, trg len, hid dim]
#positionwise feedforward
_trg = self.positionwise_feedforward(trg)
#dropout, residual and layer norm
trg = self.ff_layer_norm(trg + self.dropout(_trg))
#trg = [batch size, trg len, hid dim]
#attention = [batch size, n heads, trg len, src len]
return trg, attention
class Seq2Seq(nn.Module):
def __init__(self,
encoder,
decoder,
src_pad_idx,
trg_pad_idx,
device):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.src_pad_idx = src_pad_idx
self.trg_pad_idx = trg_pad_idx
self.device = device
def make_src_mask(self, src):
#src = One of : [batch size, src len], [batch size, src len, input dim]
if len(src.shape) == 2:
src_mask = (src != self.src_pad_idx).unsqueeze(1).unsqueeze(2)
elif len(src.shape) == 3:
src_mask = (src[:,:,0] != self.src_pad_idx).unsqueeze(1).unsqueeze(2)
else:
raise ValueError(f'src has {len(src.shape)} dims')
return src_mask #src_mask = [batch size, 1, 1, src len]
def make_trg_mask(self, trg):
#trg = [batch size, trg len]
trg_pad_mask = (trg != self.trg_pad_idx).unsqueeze(1).unsqueeze(2)
#trg_pad_mask = [batch size, 1, 1, trg len]
trg_len = trg.shape[1]
trg_sub_mask = torch.tril(torch.ones((trg_len, trg_len), device = self.device)).bool()
#trg_sub_mask = [trg len, trg len]
trg_mask = trg_pad_mask & trg_sub_mask
#trg_mask = [batch size, 1, trg len, trg len]
| |
<reponame>Crunch-io/crunch-cube
# encoding: utf-8
"""Provides the Dimension class."""
import copy
from collections.abc import Sequence
from typing import Dict, Iterator, List, Optional, Tuple, Union
import numpy as np
from cr.cube.enums import (
COLLATION_METHOD as CM,
DIMENSION_TYPE as DT,
MARGINAL,
MEASURE,
_DimensionType,
)
from cr.cube.util import lazyproperty
class _BaseDimensions(Sequence):
"""Base class for dimension collections."""
def __getitem__(self, idx_or_slice):
"""Implements indexed access."""
return self._dimensions[idx_or_slice]
def __iter__(self):
"""Implements (efficient) iterability."""
return iter(self._dimensions)
def __len__(self):
"""Implements len(elements)."""
return len(self._dimensions)
@lazyproperty
def _dimensions(self) -> Tuple["Dimension", ...]:
"""tuple of dimension objects in this collection.
This composed tuple is the source for the dimension objects in this
collection.
"""
raise NotImplementedError(
"must be implemented by each subclass"
) # pragma: no cover
class AllDimensions(_BaseDimensions):
"""Collection containing every dimension defined in cube response."""
def __init__(self, dimension_dicts):
self._dimension_dicts = dimension_dicts
@lazyproperty
def apparent_dimensions(self) -> "_ApparentDimensions":
"""_ApparentDimensions collection of the "visible" dimensions.
The two dimensions for a multiple-response (MR) variable are
conflated into a single dimensions in this collection.
"""
return _ApparentDimensions(all_dimensions=self._dimensions)
@lazyproperty
def dimension_order(self) -> Tuple[int, ...]:
"""Tuple of int representing the dimension order.
The dimension order depends on the presence of numeric array in the dimensions
and the number of the cube dimensions. In case of 3 dimensions e.g.
NUM_ARR_X_MR_SUBVAR_X_MR_CAT the order should be (1,2,0) that is basically
swapping the MR (2 dimensions) with the NUM_ARRAY dimension. In case of 2
dimensions the dimension order correspond simpy to the reverse of the original
dimension order.
"""
# NOTE: this is a temporary hack that goes away when we introduce the dim_order
# concept. We should receive the actual order directly in the cube_response.
# So, all this logic will be deleted.
dimension_types = tuple(d.dimension_type for d in self._dimensions)
dim_order = tuple(range(len(self._dimensions)))
if len(self._dimensions) >= 2 and DT.NUM_ARRAY in dimension_types:
return (
dim_order[-2:] + (dim_order[0],)
if len(self._dimensions) == 3
else dim_order[::-1]
)
return dim_order
@lazyproperty
def shape(self) -> Tuple[int, ...]:
"""Tuple of int element count for each dimension.
This corresponds to the shape of the ndarray representing the raw
cube response values (raw meaning including missing and prunable
elements and any MR_CAT dimensions).
"""
dimensions = [self._dimensions[i] for i in self.dimension_order]
return tuple(d.shape for d in dimensions)
@lazyproperty
def _dimensions(self) -> Tuple["Dimension", ...]:
"""tuple of dimension objects in this collection.
This composed tuple is the internal source for the dimension objects
in this collection.
"""
return tuple(_DimensionFactory.iter_dimensions(self._dimension_dicts))
class _ApparentDimensions(_BaseDimensions):
"""Collection containing only "user" dimensions of a cube."""
def __init__(self, all_dimensions):
self._all_dimensions = all_dimensions
@lazyproperty
def _dimensions(self) -> Tuple["Dimension", ...]:
"""tuple of dimension objects in this collection.
This composed tuple is the source for the dimension objects in this
collection.
"""
return tuple(d for d in self._all_dimensions if d.dimension_type != DT.MR_CAT)
class _DimensionFactory:
"""Produce Dimension objects of correct type from dimension-dicts.
"type" here is primarily the `.dimension_type` value of the dimension,
although if `Dimension` becomes an object hierarchy, this factory would
make dimension class choices as well.
"""
def __init__(self, dimension_dicts):
self._dimension_dicts = dimension_dicts
@classmethod
def iter_dimensions(cls, dimension_dicts) -> Iterator["Dimension"]:
"""Generate Dimension object for each of *dimension_dicts*."""
return cls(dimension_dicts)._iter_dimensions()
def _iter_dimensions(self) -> Iterator["Dimension"]:
"""Generate Dimension object for each dimension dict."""
return (
Dimension(raw_dimension.dimension_dict, raw_dimension.dimension_type)
for raw_dimension in self._raw_dimensions
)
@lazyproperty
def _raw_dimensions(self) -> Tuple["_RawDimension", ...]:
"""Sequence of _RawDimension objects wrapping each dimension dict."""
return tuple(
_RawDimension(dimension_dict, self._dimension_dicts)
for dimension_dict in self._dimension_dicts
)
class _RawDimension:
"""Thin wrapper around dimension-dict to support dimension-type discovery.
Determining dimension-type is pretty complex and requires repeated
partial parsing of both the dimension dict and its siblings. This class
abstracts that access for clarity.
"""
def __init__(self, dimension_dict, dimension_dicts):
self._dimension_dict = dimension_dict
self._dimension_dicts = dimension_dicts
@lazyproperty
def dimension_dict(self) -> Dict:
"""dict defining this dimension in cube response."""
return self._dimension_dict
@lazyproperty
def dimension_type(self) -> _DimensionType:
"""_DimensionType (member of DIMENSION_TYPE) appropriate to dimension_dict."""
base_type = self._base_type
if base_type == "categorical":
return self._resolve_categorical()
if base_type == "enum.variable":
return self._resolve_array_type()
if base_type == "enum.datetime":
return DT.DATETIME
if base_type == "enum.numeric":
return DT.BINNED_NUMERIC
if base_type == "enum.text":
return DT.TEXT
if base_type == "enum.num_arr":
return DT.NUM_ARRAY
raise NotImplementedError(f"unrecognized dimension type {base_type}")
@lazyproperty
def _alias(self) -> str:
"""Return str key for variable behind *dimension_dict*."""
return self._dimension_dict["references"]["alias"]
@lazyproperty
def _base_type(self) -> str:
"""Return str like 'enum.numeric' representing dimension type.
This string is a 'type.subclass' concatenation of the str keys
used to identify the dimension type in the cube response JSON.
The '.subclass' suffix only appears where a subtype is present.
"""
type_class = self._dimension_dict["type"]["class"]
if type_class == "categorical":
return "categorical"
if type_class == "enum":
subclass = self._dimension_dict["type"]["subtype"]["class"]
return f"enum.{subclass}"
raise NotImplementedError(f"unexpected dimension type class '{type_class}'")
@lazyproperty
def _categories(self) -> List[Dict]:
return self._dimension_dict["type"].get("categories", [])
@lazyproperty
def _has_selected_category(self) -> bool:
"""True if dimension-dict includes one or more selected categories.
A "selected" category-dict is one having `'selected': True`. This
property is only meaningful for a categorical dimension dict.
"""
return any(category.get("selected") for category in self._categories)
@lazyproperty
def _is_logical_type(self) -> bool:
"""True if dimension-dict has the categories equal to those of the logical type.
Logical type has exactly three categories with IDs [-1, 0, 1]. This type is
used to define the selections dimension of the multiple response type, when it
follows the subvariables dimension.
"""
return [category.get("id") for category in self._categories] == [1, 0, -1]
@lazyproperty
def _is_array_cat(self) -> bool:
"""True if a categorical dimension_dict belongs to an array pair.
Returns True for a CA_CAT or MR_CAT dimension. Only meaningful when
the dimension is known to be categorical (has base-type
'categorical').
"""
return "subreferences" in self._dimension_dict["references"]
@lazyproperty
def _is_cat_date(self) -> bool:
"""True if dimension is a categorical date, False otherwise.
A dimension is a categorical date, if it has all the properties of a "normal"
categorical dimension, but also a `"date"` field in any of its categories.
"""
if self._dimension_dict["type"]["class"] != "categorical":
return False
return any("date" in cat for cat in self._categories)
@lazyproperty
def _next_raw_dimension(self) -> Optional["_RawDimension"]:
"""_RawDimension for next *dimension_dict* in sequence or None for last.
Returns None if this dimension is the last in sequence for this cube.
"""
dimension_dicts = self._dimension_dicts
this_idx = dimension_dicts.index(self._dimension_dict)
if this_idx > len(dimension_dicts) - 2:
return None
return _RawDimension(dimension_dicts[this_idx + 1], self._dimension_dicts)
def _resolve_array_type(self) -> _DimensionType:
"""Return one of the ARRAY_TYPES members of DIMENSION_TYPE.
This method distinguishes between CA and MR dimensions. The return
value is only meaningful if the dimension is known to be of array
type (i.e. either CA or MR, base-type 'enum.variable').
"""
next_raw_dimension = self._next_raw_dimension
if next_raw_dimension is None:
return DT.CA
is_mr_subvar = (
next_raw_dimension._base_type == "categorical"
and next_raw_dimension._has_selected_category
and next_raw_dimension._alias == self._alias
and next_raw_dimension._is_logical_type
)
return DT.MR if is_mr_subvar else DT.CA
def _resolve_categorical(self) -> _DimensionType:
"""Return one of the categorical members of DIMENSION_TYPE.
This method distinguishes between CAT, CA_CAT, MR_CAT, and LOGICAL
dimension types, all of which have the base type 'categorical'. The
return value is only meaningful if the dimension is known to be one
of the categorical types (has base-type 'categorical').
"""
# ---an array categorical is either CA_CAT or MR_CAT---
if self._is_array_cat:
return (
DT.MR_CAT
if self._has_selected_category and self._is_logical_type
else DT.CA_CAT
)
# ---what's left is three different versions of categorical dimension---
# ---first the logical---
if self._has_selected_category and self._is_logical_type:
return DT.LOGICAL
# ---or a categorical date---
if self._is_cat_date:
return DT.CAT_DATE
# ---or the plain-old categorical---
return DT.CAT
class Dimension:
"""Represents one dimension of a cube response.
Each dimension represents one of the variables in a cube response. For
example, a query to cross-tabulate snack-food preference against region
will have two variables (snack-food preference and region) and will produce
a two-dimensional (2D) cube response. That cube will have two of these
dimension objects, which are accessed using
:attr:`.CrunchCube.dimensions`.
"""
def __init__(self, dimension_dict, dimension_type, dimension_transforms=None):
self._unshimmed_dimension_dict = dimension_dict
self._dimension_type = dimension_type
self._unshimmed_dimension_transforms_dict = dimension_transforms or {}
@lazyproperty
def alias(self) -> Optional[str]:
"""Return the alias for the dimension if it exists, None otherwise."""
return self._dimension_dict["references"].get("alias")
@lazyproperty
def all_elements(self) -> "_AllElements":
"""_AllElements object providing cats or subvars of this dimension.
Elements in this sequence appear in cube-result order.
"""
return _AllElements(
self._dimension_dict["type"],
| |
from django.shortcuts import render,redirect
from django.http import JsonResponse
from .models import *
from django.views import View
from django.db.models import Q
from django.forms import model_to_dict
from django.contrib.auth import get_user_model
# Create your views here.
from .models import *
from .forms import *
from ajax_datatable.views import AjaxDatatableView
def home_page(request):
return render(request, 'home.html')
#region ########### Indent ###########
class indent_table(AjaxDatatableView):
model = indent
title = 'Indent'
length_menu = [[-1,25, 50, 100], ['all',25, 50, 100]]
initial_order = [["recived","asc"]]
search_values_separator = " "
column_defs = [
AjaxDatatableView.render_row_tools_column_def(),
{
'name': 'pk',
'visible': True,
'searchable': False,
'orderable': True,
'title': 'Indt.',
}, # pk
{
'name': 'material_shape',
'visible': True,
'searchable': True,
'orderable': True,
'title': 'Shape',
}, # material_shape
{
'name': 'Description',
'foreign_field': 'item_description__description',
'visible': True,
'searchable': True,
'placeholder':'description',
'title':'Item Description',
}, # Item Description
{
'name': 'description',
'visible': True,
'orderable': False,
'searchable': False,
'title': 'Description',
}, # value
{
'name': 'weight',
'visible': True,
'searchable': False,
'orderable': False,
'title': 'Weight',
}, # weight
{
'name': 'size',
'visible': True,
'searchable': False,
'orderable': False,
'title': 'Size',
}, # size
{
'name': 'thickness',
'visible': True,
'searchable': False,
'orderable': False,
'title': 'THK',
}, # thickness
{
'name': 'quantity',
'visible': True,
'searchable': False,
'orderable': False,
'title': 'Qut',
}, # quantity
{
'name': 'value',
'visible': True,
'orderable': False,
'searchable': False,
'title': 'Value',
'className': 'currency',
}, # value
{
'name': 'net_value',
'visible': True,
'orderable': False,
'searchable': False,
'title': 'Total Value',
'className': 'currency',
}, # net_value
{
'name': 'recived',
'visible': True,
'orderable': True,
'searchable': False,
'title': 'Recived',
'className':"is_completed",
}, # recived
{'name': 'Add GRN', 'visible': True,'searchable': False, 'orderable': False},
{'name': 'Edit', 'visible': True,'searchable': False, 'orderable': False},
{
'name':'Delete',
'visible': True,
'searchable': False,
'orderable': False,
"title":"DEL"
}, # delete field
]
def get_initial_queryset(self, request=None):
wo_id=request.REQUEST.get('wo_id')
queryset = self.model.objects.all()
queryset = queryset.filter(WO__id=wo_id)
# queryset = self.model.objects.all()
return queryset
def customize_row(self, row, obj):
# 'row' is a dictionary representing the current row, and 'obj' is the current object.
get_str = lambda x: x if x else "--"
row['net_value'] = f''' {obj.net_value()}'''
row['size'] = get_str(obj.size)
row['thickness'] = get_str(obj.thickness)
row['weight'] = f''' {obj.get_weight()}'''
# row['recived'] = f'''<td class="is_completed" data="{obj.recived}">
# </td>'''
# print(row['recived'])
row['Add GRN'] = f'''<td class="">
<a href="/indent/{obj.pk}/grn/form/" target="_blank">
<img src="../../../../static/Images/enter.png" style="width:17px;height:17px" alt="enter">
</a>
</td>'''
if obj.locked:
row['Edit'] = f'''<td class="border-0">
<a data-id="{obj.pk}" onclick="edit_locked('/wo/{obj.WO.pk}/indent/form/{obj.pk}')"><img src="../../../../../static/Images/lock.png" style="width:17px;height:17px" alt="edit"></a>
</td>'''
else:
row['Edit'] = f'''<td class="border-0">
<a href="/wo/{obj.WO.pk}/indent/form/{obj.pk}"><img src="../../../../../static/Images/editing.png" style="width:17px;height:17px" alt="edit"></a>
</td>'''
row['Delete'] =f'''<div class="form-check" onclick="checkSelected()">
<input class="form-check-input del_input" type="checkbox"
name="del" value="{obj.pk}" input_name="{obj}">
</div>'''
return
def render_row_details(self, pk, request=None):
obj = self.model.objects.get(pk=pk)
# fields = [f for f in self.model._meta.get_fields() if f.concrete]
fields = {
"Recived": obj.recived_quantity,
'Material Type':obj.material_type,
"Size":obj.size,
"Thickness":obj.thickness,
"Width":obj.width,
"Internal Diameter":obj.internal_diameter,
'Tax':str(obj.tax)+"%",
"Comment":obj.comment,
"Has PO": True if obj.PO else False,
}
currency={
'Discount':obj.discount,
'Other Expanses':obj.other_expanses,
}
fields = {k: v for k, v in fields.items() if v != None}
fields = {k: v for k, v in fields.items() if v != ""}
# print(student_details.Division_id.Semester_id)
html = '<table class="table-bordered" style="width:60%">'
if obj.PO:
html += '<tr><td class="">PO Number</td><td class=""><a href = "/po/table/">%s</a></td></tr>' % (obj.PO)
for key in fields:
html += '<tr><td class="">%s</td><td class="">%s</td></tr>' % (key, fields[key])
for key in currency:
html += '<tr><td class="">%s</td><td class="currency">%s</td></tr>' % (key, currency[key])
html += '</table>'
return html
class indent_table_page(View):
template_name = "indent/indent_table.html"
def get(self, request,wo_id):
wo = work_order.objects.filter(pk=wo_id).first()
context= {
"update":[],
'all_indents': indent.objects.all().filter(WO__id=wo_id),
'wo':wo,
'wo_id':wo_id,
}
return render(request,self.template_name,context)
def post(self, request,wo_id):
pks = request.POST.getlist("pks[]")
for i in pks:
obj = indent.objects.filter(pk=i).first()
# obj.quantity=3
# obj.save()
obj.delete()
return JsonResponse({"deleted":True})
class indent_form(View):
template_name = "indent/indent_form.html"
def get(self, request,wo_id=None,indent_id=None):
self.context= {
"update":[],
'all_indents': indent.objects.all(),
"all_item_description":item_description.objects.all(),
'wo':work_order.objects.get(pk=wo_id),
}
if indent_id:
instance = indent.objects.get(pk=indent_id)
print(indent_id,"here in Update")
self.context['update'] = instance
self.context['success'] = False
return render(request,self.template_name,self.context)
else:
self.context['update'] = []
print(self.context['update'],"here in add")
return render(request,self.template_name,self.context)
def post(self, request,wo_id=None,indent_id=None):
wo = work_order.objects.filter(pk=wo_id).first()
self.context= {
"update":[],
'all_indents': indent.objects.all(),
"all_item_description":item_description.objects.all(),
'wo':wo,
}
tempdict = self.request.POST.copy()
tempdict['value'] = tempdict['value'].replace(',', '').replace("₹","")
if indent_id:
instance = indent.objects.get(pk=indent_id)
form = add_indent(tempdict,instance=instance)
if not wo:
wo = instance.WO
else:
form = add_indent(tempdict)
if form.is_valid():
temp = form.save(commit=False)
temp.WO = wo
item_desc,_=item_description.objects.get_or_create(
description=tempdict.get("item_description")
)
print(item_desc,_)
temp.item_description = item_desc
self.context['update'] = form.instance
self.context['success'] = True
print(temp.item_description)
temp.save()
else:
self.context['errors'] = form.errors.as_ul()
print(form.errors)
# self.context['update'] = form.instance
return render(request,self.template_name,self.context)
class all_indents_datatable(AjaxDatatableView):
model = indent
title = 'Indent'
length_menu = [[-1,25, 50, 100], ['all',25, 50, 100]]
initial_order = [["WO","asc"]]
search_values_separator = " "
column_defs = [
AjaxDatatableView.render_row_tools_column_def(),
{
'name': 'pk',
'visible': True,
'searchable': False,
'orderable': True,
'title': 'Indent No.',
}, # pk
{
'name': 'WO',
'foreign_field': 'WO__wo_number',
'visible': True,
'searchable': True,
'placeholder':'WO'
}, # WO
{
'name': 'PO',
'foreign_field': 'PO__po_number',
'visible': True,
'searchable': True,
'placeholder':'WO'
}, # PO
{
'name': 'material_shape',
'visible': True,
'searchable': True,
'orderable': True,
'title': 'Shape',
}, # material_shape
{
'name': 'Description',
'foreign_field': 'item_description__description',
'visible': True,
'searchable': True,
'placeholder':'description'
}, # Description
{
'name': 'weight',
'visible': True,
'searchable': False,
'orderable': False,
'title': 'Weight',
}, # weight
{
'name': 'size',
'visible': True,
'searchable': False,
'orderable': False,
'title': 'Size',
}, # size
{
'name': 'thickness',
'visible': True,
'searchable': False,
'orderable': False,
'title': 'THK',
}, # thickness
{
'name': 'quantity',
'visible': True,
'searchable': False,
'orderable': False,
'title': 'Qut',
}, # quantity
{
'name': 'net_value',
'visible': True,
'orderable': False,
'searchable': False,
'title': 'Net Val',
'className': 'currency',
}, # net_value
{'name': 'Edit', 'visible': True,'searchable': False, 'orderable': False},
]
def get_initial_queryset(self, request=None):
wo_id=request.REQUEST.get('wo_id')
queryset = self.model.objects.all()
queryset = queryset.filter(recived=False)
# queryset = self.model.objects.all()
return queryset
def customize_row(self, row, obj):
# 'row' is a dictionary representing the current row, and 'obj' is the current object.
get_str = lambda x: x if x else "--"
row['net_value'] = f''' {obj.net_value()}'''
row['size'] = get_str(obj.size)
row['PO'] = get_str(obj.PO.po_number) if obj.PO else "----"
row['thickness'] = get_str(obj.thickness)
row['WO'] = f'<a href="/wo/{obj.WO.pk}/indent/table/">{obj.WO}</a>'
row['weight'] = f''' {obj.get_weight()}'''
if obj.locked:
row['Edit'] = f'''<td class="border-0">
<a data-id="{obj.pk}" onclick="edit_locked('/wo/{obj.WO.pk}/indent/form/{obj.pk}')"><img src="../../../../../static/Images/lock.png" style="width:17px;height:17px" alt="edit"></a>
</td>'''
else:
row['Edit'] = f'''<td class="border-0">
<a href="/wo/{obj.WO.pk}/indent/form/{obj.pk}"><img src="../../../../../static/Images/editing.png" style="width:17px;height:17px" alt="edit"></a>
</td>'''
return
def render_row_details(self, pk, request=None):
obj = self.model.objects.get(pk=pk)
# fields = [f for f in self.model._meta.get_fields() if f.concrete]
fields = {
"Recived": obj.recived_quantity,
'Material Type':obj.material_type,
'Item Description':obj.item_description,
"Size":obj.size,
"Thickness":obj.thickness,
"Width":obj.width,
"Internal Diameter":obj.internal_diameter,
'Description':obj.description,
'Tax':str(obj.tax)+"%",
"Comment":obj.comment,
"Has PO": True if obj.PO else False,
}
currency={
'Value':obj.value,
'Discount':obj.discount,
'Other Expanses':obj.other_expanses,
}
fields = {k: v for k, v in fields.items() if v != None}
fields = {k: v for k, v in fields.items() if v != ""}
# print(student_details.Division_id.Semester_id)
html = '<table class="table-bordered" style="width:60%">'
if obj.PO:
html += '<tr><td class="">PO Number</td><td class=""><a href = "/po/table/">%s</a></td></tr>' % (obj.PO)
for key in fields:
html += '<tr><td class="">%s</td><td class="">%s</td></tr>' % (key, fields[key])
for key in currency:
html += '<tr><td class="">%s</td><td class="currency">%s</td></tr>' % (key, currency[key])
html += '</table>'
return html
class all_indent_table(View):
template_name = 'indent/all_indent.html'
def get(self, request):
return render(request,self.template_name)
#endregion
#region ########### Purchase Order ###########
class PO_datatable(AjaxDatatableView):
model = purchase_order
title = 'Purchase Order'
length_menu = [[-1,25, 50, 100], ['all',25, 50, 100]]
initial_order = [["po_number","asc"]]
search_values_separator = " "
column_defs = [
AjaxDatatableView.render_row_tools_column_def(),
{
'name': 'id',
'visible': False,
'searchable': False,
},
{
'name': 'po_number',
'visible': True,
'searchable': True,
'orderable': True,
'title': 'PO Number',
}, # po number
{
'name': 'Vendor',
'foreign_field': 'vendor_id__vendor_name',
'visible': True,
'searchable': True,
'placeholder':'Vendor'
}, # vendor
{
'name': 'remaining_quantity',
'visible': True,
'orderable': True,
'searchable': False,
'title': 'Remaining Quantity',
}, # quantity
{
'name': 'po_date',
'visible': True,
'searchable': False,
'orderable': True,
'title': 'PO Date',
}, # po date
{
'name': 'net_value',
'visible': True,
'orderable': False,
'searchable': False,
'title': 'Net Value',
'className': 'currency',
},# net_value
{
'name': 'is_complete',
'visible': True,
'orderable': True,
'searchable': False,
'title': 'Completed',
'className':"is_completed",
},
{'name': 'Print', 'visible': True,'searchable': False, 'orderable': False},
{'name': 'Edit', 'visible': True,'searchable': False, 'orderable': False},
{
'name':'Delete',
'visible': True,
'searchable': False,
'orderable': False
}, # delete field
]
def customize_row(self, row, obj):
# 'row' is a dictionary representing the current row, and 'obj' is the current object.
net_value = 0
total_quantity,remaining_quantity = 0,0
for indent in obj.indent_set.all():
net_value += indent.net_value()
remaining_quantity += indent.get_remaining_quantity()
total_quantity += indent.quantity
row['po_date'] = obj.get_date()
row['net_value'] = f'{round(net_value,2)}'
row["remaining_quantity"] = f'{int(remaining_quantity)} out of {int(total_quantity)}'
row['Print'] = f'''<td class="">
<a href="../report_input/{obj.pk}" >
<img src="../../../static/Images/print.png" style="width:17px;height:17px" alt="print"></a>
</td>'''
row['Edit'] = f'''<td class="">
<a href="../form/{obj.pk}" >
<img src="../../../static/Images/editing.png" style="width:17px;height:17px" alt="edit"></a>
</td>'''
row['Indent List'] = f'''<td class="">
<a href="indent/table/{obj.pk}" >
<img src="../../static/Images/enter.png" style="width:17px;height:17px" alt="enter">
</a>
</td>'''
row['Delete'] =f'''<div class="form-check" onclick="checkSelected()">
<input class="form-check-input del_input" type="checkbox"
name="del" value="{obj.pk}" input_name="{obj}">
</div>'''
return
def render_row_details(self, pk, request=None):
obj = self.model.objects.get(pk=pk)
# fields = [f for f in self.model._meta.get_fields() if f.concrete]
fields = {
}
fields = {k: v for k, v in fields.items() if v != None}
fields = {k: v for k, v in fields.items() if v != ""}
indent_list_html = '<table class="table-bordered" style="width:100%">'
indent_list_html += f'<tr><th class="d-flex justify-content-center">Indent</td><td class="">Balance</td></tr>'
for indent in obj.indent_set.all():
dimentions = f"{indent.size} X {indent.thickness} X {indent.width} X {indent.internal_diameter}".replace(" X None","").replace("None","")
indent_list_html += f'<tr><td class="d-flex justify-content-left">{indent.pk} -- <a href="/wo/{indent.WO.pk}/indent/table" >{indent.WO}</a> [{indent.item_description} ({dimentions})]</td><td class="">  {indent.get_remaining_quantity()} out of {int(indent.quantity)}</td></tr>'
indent_list_html += '</table>'
# print(student_details.Division_id.Semester_id)
html = '<table class="table-bordered" style="width:80%">'
for key in fields:
html += '<tr><td class="">%s</td><td class="">%s</td></tr>' % (key, fields[key])
html += '<tr><td class="">Indent List</td><td class="m-0 p-0">%s</td></tr>' % (indent_list_html)
html += '</table>'
return html
def update_indent_PO(indent_list,PO):
'adds the PO to all the indents'
my_indents = PO.indent_set.all()
new_indents = set(indent.objects.all().filter(pk__in = indent_list))
old_indents = set(my_indents)
to_be_deleted = old_indents.difference(new_indents)
to_be_saved = new_indents.difference(old_indents)
for i in to_be_deleted:
i.PO = None
i.save()
for i in to_be_saved:
i.PO = PO
i.save()
class PO_form(View):
template_name = "po/PO_form.html"
def get(self, request,po_id=None):
self.context= {
"update":[],
'all_vendors':vendor_details.objects.all(),
'all_indent':list(indent.objects.all().filter(PO=None).order_by("WO")),
}
if po_id:
instance = purchase_order.objects.get(pk=po_id)
my_indents = instance.indent_set.all()
self.context['update'] = instance
self.context['indent_list'] = my_indents
self.context['all_indent'] += list(my_indents)
self.context['success'] = False
return render(request,self.template_name,self.context)
def post(self, request,po_id=None):
self.context= {
"update":[],
'all_vendors':vendor_details.objects.all(),
'all_indent':list(indent.objects.all().filter(PO=None).order_by("WO")),
}
if po_id:
instance = purchase_order.objects.get(pk=po_id)
form = update_PO(request.POST,instance=instance)
else:
form = add_PO(request.POST)
if form.is_valid():
a = form.save()
print(a)
indent_list = request.POST.getlist('indent_list')
update_indent_PO(indent_list,a)
my_indents = a.indent_set.all()
self.context['update'] = form.instance
self.context['indent_list'] = my_indents
self.context['all_indent'] += list(my_indents)
self.context['all_indent'] = set(self.context['all_indent'])
self.context['success'] = True
else:
self.context['errors'] = form.errors.as_ul()
print(form.errors)
# self.context['update'] = form.instance
return render(request,self.template_name,self.context)
class PO_table(View):
template_name = "po/PO_table.html"
def get(self, request):
context= {
"update":[],
'all_PO': purchase_order.objects.all()
}
return render(request,self.template_name,context)
def post(self, request):
pass
def po_print_inputs(request,po_id):
my_po = purchase_order.objects.get(pk=po_id)
context = {'my_po':my_po}
return render(request,"po/po_print_input.html",context)
def print_report(request,po_id):
my_po = purchase_order.objects.get(pk=po_id)
my_indents = indent.objects.all().filter(PO=my_po)
total_gross_value,total_net_value,total_quantity,total_tax_value,total_weight = 0,0,0,0,0
for my_indent in my_indents:
total_net_value += my_indent.net_value()
total_quantity += my_indent.quantity
total_tax_value += my_indent.tax_amount()
total_weight += my_indent.get_weight()
total_gross_value += my_indent.gross_value()
delivery_day = request.GET['delivery_day']
payment_term = request.GET['payment_term']
freight_charges = request.GET['freight_charges']
com_name = request.GET['com_name']
# print( request.GET)
context = {
"my_po":my_po,
"all_indents":my_indents,
"total_net_value":round(total_net_value,2),
"total_quantity":round(total_quantity,2),
"total_tax_value":round(total_tax_value,2),
"total_weight":round(total_weight,3),
"total_gross_value":round(total_gross_value,2),
"delivery_day":delivery_day,
"payment_term":payment_term,
"freight_charges":freight_charges,
"com_name":com_name,
}
# print(context['total_net_value'])
# total_quantity = indent.objects.all()
return render(request,"po/report.html",context)
def lock_po_indent(request,po_id):
my_po = purchase_order.objects.get(pk=po_id)
my_indents = indent.objects.all().filter(PO=my_po)
for my_indent in my_indents:
my_indent.locked = True
# print(my_indent.locked)
my_indent.save()
return JsonResponse({"done":True})
#endregion
#region ########### Work-Order ###########
def show_stock(request):
stock_wo = work_order.objects.all().get(wo_number="STOCK")
return redirect(f"/wo/{stock_wo.pk}/indent/table/")
class WO_datatable(AjaxDatatableView):
model = work_order
title = 'work_order'
length_menu = [[-1,25, 50, 100], ['all',25, 50, 100]]
initial_order = [["is_complete","asc"]]
search_values_separator = " "
column_defs = [
AjaxDatatableView.render_row_tools_column_def(),
{
'name': 'id',
'visible': False,
'searchable': False,
},
{
'name': 'wo_number',
'visible': True,
'searchable': True,
'orderable': True,
'title': 'WO Number',
}, # wo_number
{
'name': 'description',
'visible': True,
'searchable': True,
'orderable': True,
'title': 'Description',
}, # description
{
'name': 'quantity',
'visible': True,
'orderable': True,
'searchable': False,
'title': 'Quantity',
}, # quantity
{
'name': 'net_value',
'visible': True,
'orderable': False,
'searchable': False,
'title': 'Net Value',
'className': 'currency',
}, # net_value
{
'name': 'is_complete',
'visible': True,
'orderable': True,
'searchable': False,
'title': 'Completed',
'className':"is_completed",
}, # is_complete
{'name': 'Indent List', 'visible': True,'searchable': False, 'orderable': False},
{'name': 'Print', 'visible': True,'searchable': False, 'orderable': False},
{'name': 'Edit', 'visible': True,'searchable': False, 'orderable': False},
{
'name':'Delete',
'visible': True,
'searchable': False,
'orderable': False
}, # delete field
]
def get_initial_queryset(self, request=None):
# po_id=request.REQUEST.get('po_id')
queryset = self.model.objects.all().exclude(wo_number="STOCK")
# queryset = queryset.filter(PO__id=po_id)
# queryset = self.model.objects.all()
return queryset
def customize_row(self, row, obj):
# 'row' is a dictionary representing the current row, and 'obj' is the current object.
row['net_value'] = f''' {obj.net_value()}'''
row['Print'] = f'''<td class="">
<a href="../../wo/print_indents/{obj.pk}/" >
<img src="../../../static/Images/print.png" style="width:17px;height:17px" alt="print"></a>
</td>'''
row['Edit'] = f'''<td class="">
<a href="../form/{obj.pk}" >
<img src="../../../static/Images/editing.png" style="width:17px;height:17px" alt="edit"></a>
</td>'''
row['Indent List'] = f'''<td class="">
<a href="/wo/{obj.pk}/indent/table/" >
<img src="../../static/Images/enter.png" style="width:17px;height:17px" alt="enter">
</a>
</td>'''
row['Delete'] =f'''<div class="form-check" onclick="checkSelected()">
<input class="form-check-input del_input" type="checkbox"
name="del" value="{obj.pk}" input_name="{obj}">
</div>'''
return
def render_row_details(self, pk, request=None):
obj = self.model.objects.get(pk=pk)
# fields = [f | |
"""
Routines to load a corpus and perform the necessary pre processing on the audio files and labels.
Contains helper methods to load audio files, too.
"""
import csv
import os
import random
import numpy as np
import python_speech_features as psf
import tensorflow as tf
from scipy.io import wavfile
from asr.dataset.config import CORPUS_DIR
from asr.dataset.config import CSV_DELIMITER, CSV_FIELDNAMES, CSV_HEADER_LABEL, CSV_HEADER_PATH
from asr.dataset.csv_helper import get_bucket_boundaries
from asr.labels import ctoi
from asr.params import NP_FLOAT, WIN_LENGTH, WIN_STEP, NUM_FEATURES, FLAGS
def input_fn_generator(target):
"""
Generate the `input_fn` for the TensorFlow estimator.
Args:
target (str): The type of input, this affects the used CSV file, batching method and epochs.
Supported targets are:
* 'train_bucket': Creates 1 epoch of training data, using bucketing.
Examples are shuffled.
* 'train_batch': Creates 1 epoch of training data, using batches.
Examples are in the order of the `train.csv` file.
* 'dev': Creates 1 epoch of evaluation data from the `dev.csv` file.
Uses buckets. Examples are shuffled.
* 'test': Creates 1 epoch of evaluation data from the `test.csv` file.
Uses buckets. Examples are shuffled.
Returns:
function: Input function pointer.
"""
if target == 'train_bucket':
csv_path = FLAGS.train_csv
use_buckets = True
epochs = 1
elif target == 'train_batch':
csv_path = FLAGS.train_csv
use_buckets = False
epochs = 1
elif target == 'dev':
csv_path = FLAGS.dev_csv
use_buckets = True
epochs = 1
elif target == 'test':
csv_path = FLAGS.test_csv
use_buckets = True
epochs = 1
else:
raise ValueError('Invalid target: "{}"'.format(target))
# Read bucket boundaries from CSV file.
if use_buckets:
bucket_boundaries = get_bucket_boundaries(csv_path, FLAGS.num_buckets)
tf.logging.info('Using {} buckets for the {} set.'.format(len(bucket_boundaries), target))
def input_fn():
# L8ER: Try out the following two (not working as of TF v1.12):
# https://www.tensorflow.org/api_docs/python/tf/data/experimental/latency_stats
# https://www.tensorflow.org/api_docs/python/tf/data/experimental/StatsAggregator
def element_length_fn(_spectrogram, _spectrogram_length, _label_encoded, _label_plaintext):
del _spectrogram
del _label_encoded
del _label_plaintext
return _spectrogram_length
assert os.path.exists(csv_path) and os.path.isfile(csv_path)
with tf.device('/cpu:0'):
dataset = tf.data.Dataset.from_generator(
__input_generator,
(tf.float32, tf.int32, tf.int32, tf.string),
(tf.TensorShape([None, 80]), tf.TensorShape([]),
tf.TensorShape([None]), tf.TensorShape([])),
args=[csv_path, use_buckets])
if use_buckets:
# Set shuffle buffer to an arbitrary size to ensure good enough shuffling.
# At the moment, most shuffling is done by the `__input_generator` function.
# Also see: https://stackoverflow.com/a/47025850/2785397
dataset = dataset.shuffle(FLAGS.shuffle_buffer_size)
dataset = dataset.apply(
tf.data.experimental.bucket_by_sequence_length(
element_length_func=element_length_fn,
bucket_boundaries=bucket_boundaries,
bucket_batch_sizes=[FLAGS.batch_size] * (len(bucket_boundaries) + 1),
pad_to_bucket_boundary=False, # False => pad to longest example in batch
no_padding=False
)
)
else:
dataset = dataset.padded_batch(batch_size=FLAGS.batch_size,
padded_shapes=([None, 80], [], [None], []),
drop_remainder=True)
# dataset.cache()
dataset = dataset.prefetch(64)
# Number of epochs.
dataset = dataset.repeat(epochs)
iterator = dataset.make_one_shot_iterator()
spectrogram, spectrogram_length, label_encoded, label_plaintext = iterator.get_next()
features = {
'spectrogram': spectrogram,
'spectrogram_length': spectrogram_length,
'label_plaintext': label_plaintext
}
return features, label_encoded
return input_fn
def __input_generator(*args):
assert len(args) == 2, '__input_generator() arguments are a path and shuffle boolean.'
assert isinstance(args[0], bytes)
assert isinstance(args[1], np.bool_)
csv_path = str(args[0], 'utf-8')
shuffle = bool(args[1])
with open(csv_path, 'r', encoding='utf-8') as file_handle:
reader = csv.DictReader(file_handle, delimiter=CSV_DELIMITER, fieldnames=CSV_FIELDNAMES)
lines = list(reader)[1: -1] # Remove CSV header and final blank line.
# Shuffle the CSV lines.
if shuffle:
random.shuffle(lines)
# Read the CSV lines and extract spectrogram and label for each line.
for line in lines:
path = line[CSV_HEADER_PATH]
label = line[CSV_HEADER_LABEL]
path = os.path.join(CORPUS_DIR, path)
# Convert the WAV file into
spectrogram, spectrogram_length = load_sample(path)
# Convert character sequence label to integer sequence.
label_encoded = [ctoi(c) for c in label]
yield spectrogram, spectrogram_length, label_encoded, label
def load_sample(file_path, feature_type=None, feature_normalization=None):
"""
Loads the wave file and converts it into feature vectors.
Args:
file_path (str or bytes):
A TensorFlow queue of file names to read from.
`tf.py_func` converts the provided Tensor into `np.ndarray`s bytes.
feature_type (str): Optional.
If `None` is provided, use `FLAGS.feature_type`.
Type of features to generate. Options are 'mel' and 'mfcc'.
feature_normalization (str): Optional.
If `None` is provided, use `FLAGS.feature_normalization`.
Whether to normalize the generated features with the stated method or not.
Please consult `sample_normalization` for a complete list of normalization methods.
'local': Use local (in sample) mean and standard deviation values, and apply the
normalization element wise, like in `global`.
'local_scalar': Uses only the mean and standard deviation of the current sample.
The normalization is being applied by ([sample] - mean_scalar) / std_scalar
'none': No normalization is being applied.
Returns:
Tuple[np.ndarray. np.ndarray]:
2D array with [time, num_features] shape, containing `NP_FLOAT`.
Array containing a single int32.
"""
__supported_feature_types = ['mel', 'mfcc']
__supported_feature_normalizations = ['none', 'local', 'local_scalar']
feature_type = feature_type if feature_type is not None else FLAGS.feature_type
feature_normalization = feature_normalization if feature_normalization is not None \
else FLAGS.feature_normalization
if feature_type not in __supported_feature_types:
raise ValueError('Requested feature type of {} isn\'t supported.'
.format(feature_type))
if feature_normalization not in __supported_feature_normalizations:
raise ValueError('Requested feature normalization method {} is invalid.'
.format(feature_normalization))
if type(file_path) is not str:
file_path = str(file_path, 'utf-8')
if not os.path.isfile(file_path):
raise ValueError('"{}" does not exist.'.format(file_path))
# Load the audio files sample rate and data.
(sampling_rate, audio_data) = wavfile.read(file_path)
if len(audio_data) < 401:
raise RuntimeError('Sample length {:,d} to short: {}'.format(len(audio_data), file_path))
if not sampling_rate == FLAGS.sampling_rate:
raise RuntimeError('Sampling rate is {:,d}, expected {:,d}.'
.format(sampling_rate, FLAGS.sampling_rate))
# At 16000 Hz, 512 samples ~= 32ms. At 16000 Hz, 200 samples = 12ms. 16 samples = 1ms @ 16kHz.
f_max = sampling_rate / 2. # Maximum frequency (Nyquist rate).
f_min = 64. # Minimum frequency.
n_fft = 1024 # Number of samples in a frame.
if feature_type == 'mfcc':
sample = __mfcc(
audio_data, sampling_rate, WIN_LENGTH, WIN_STEP, NUM_FEATURES, n_fft, f_min, f_max
)
elif feature_type == 'mel':
sample = __mel(
audio_data, sampling_rate, WIN_LENGTH, WIN_STEP, NUM_FEATURES, n_fft, f_min, f_max
)
else:
raise ValueError('Unsupported feature type')
# Make sure that data type matches TensorFlow type.
sample = sample.astype(NP_FLOAT)
# Drop every 2nd time frame, if requested.
if FLAGS.features_drop_every_second_frame:
# [time, NUM_FEATURES] => [time // 2, NUM_FEATURES]
sample = sample[:: 2, :]
# Get length of the sample.
sample_len = np.array(sample.shape[0], dtype=np.int32)
# Apply feature normalization.
sample = __feature_normalization(sample, feature_normalization)
# sample = [time, NUM_FEATURES], sample_len: scalar
return sample, sample_len
def __mfcc(audio_data, sampling_rate, win_len, win_step, num_features, n_fft, f_min, f_max):
"""
Convert a wav signal into Mel Frequency Cepstral Coefficients (MFCC).
Args:
audio_data (np.ndarray): Wav signal.
sampling_rate (int): Sampling rate.
win_len (float): Window length in seconds.
win_step (float): Window stride in seconds.
num_features (int): Number of features to generate.
n_fft (int): Number of Fast Fourier Transforms.
f_min (float): Minimum frequency to consider.
f_max (float): Maximum frequency to consider.
Returns:
np.ndarray: MFCC feature vectors. Shape: [time, num_features]
"""
if num_features % 2 != 0:
raise ValueError('num_features is not a multiple of 2.')
# Compute MFCC features.
mfcc = psf.mfcc(signal=audio_data, samplerate=sampling_rate, winlen=win_len, winstep=win_step,
numcep=num_features // 2, nfilt=num_features, nfft=n_fft,
lowfreq=f_min, highfreq=f_max,
preemph=0.97, ceplifter=22, appendEnergy=True)
# And the first-order differences (delta features).
mfcc_delta = psf.delta(mfcc, 2)
# Combine MFCC with MFCC_delta
return np.concatenate([mfcc, mfcc_delta], axis=1)
def __mel(audio_data, sampling_rate, win_len, win_step, num_features, n_fft, f_min, f_max):
"""
Convert a wav signal into a logarithmically scaled mel filterbank.
Args:
audio_data (np.ndarray): Wav signal.
sampling_rate (int): Sampling rate.
win_len (float): Window length in seconds.
win_step (float): Window stride in seconds.
num_features (int): Number of features to generate.
n_fft (int): Number of Fast Fourier Transforms.
f_min (float): Minimum frequency to consider.
f_max (float): Maximum frequency to consider.
Returns:
np.ndarray: Mel-filterbank. Shape: [time, num_features]
"""
mel = psf.logfbank(signal=audio_data, samplerate=sampling_rate, winlen=win_len,
winstep=win_step, nfilt=num_features, nfft=n_fft,
lowfreq=f_min, highfreq=f_max, preemph=0.97)
return mel
def __feature_normalization(features, method):
"""
Normalize the given feature vector `y`, with the stated normalization `method`.
Args:
features (np.ndarray):
The signal array
method (str):
Normalization method:
'local': Use local (in sample) mean and standard deviation values, and apply the
normalization element wise, like in `global`.
'local_scalar': Uses only the mean and standard deviation of the current sample.
The normalization is being applied by ([sample] - mean_scalar) / std_scalar
'none': No normalization is being applied.
Returns:
np.ndarray: The normalized feature vector.
"""
if method == 'none':
return features
if method == 'local':
return (features - np.mean(features, axis=0)) / np.std(features, axis=0)
if method == 'local_scalar':
# Option 'local' uses scalar values.
return (features - np.mean(features)) / np.std(features)
raise ValueError('Invalid normalization method.')
# Create a dataset for testing purposes.
if __name__ == '__main__':
__NEXT_ELEMENT = input_fn_generator('train_bucket')
with tf.Session() as session:
# for example in range(FLAGS.num_examples_train):
for example in range(5):
| |
<reponame>sthagen/thombashi-pytablewriter
"""
.. codeauthor:: <NAME> <<EMAIL>>
"""
import abc
import copy
import math
import warnings
from typing import Any, Dict, List, Mapping, Optional, Sequence, Union, cast
import typepy
from dataproperty import (
ColumnDataProperty,
DataProperty,
DataPropertyExtractor,
Format,
MatrixFormatting,
Preprocessor,
)
from dataproperty.typing import TransFunc
from tabledata import TableData, convert_idx_to_alphabet, to_value_matrix
from typepy import String, Typecode, extract_typepy_from_dtype
from .._logger import WriterLogger
from ..error import EmptyTableDataError, EmptyTableNameError, EmptyValueError, NotSupportedError
from ..style import Align, Cell, NullStyler, Style, StylerInterface, ThousandSeparator
from ..style._theme import ColSeparatorStyleFilterFunc, StyleFilterFunc, fetch_theme
from ..typehint import Integer, TypeHint
from ._interface import TableWriterInterface
from ._msgfy import to_error_message
_ts_to_flag = {
ThousandSeparator.NONE: Format.NONE,
ThousandSeparator.COMMA: Format.THOUSAND_SEPARATOR,
ThousandSeparator.SPACE: Format.THOUSAND_SEPARATOR,
ThousandSeparator.UNDERSCORE: Format.THOUSAND_SEPARATOR,
}
class AbstractTableWriter(TableWriterInterface, metaclass=abc.ABCMeta):
"""
An abstract base class of table writer classes.
Args:
max_precision (int): Maximum decimal places for real number values.
dequote (bool): If |True|, dequote values in :py:attr:`~.value_matrix`.
.. py:attribute:: stream
Stream to write tables.
You can use arbitrary stream which supported ``write`` method
such as ``sys.stdout``, file stream, ``StringIO``, and so forth.
Defaults to ``sys.stdout``.
:Example:
:ref:`example-configure-stream`
.. py:attribute:: is_write_header
:type: bool
Write headers of a table if the value is |True|.
.. py:attribute:: is_padding
:type: bool
Padding for each item in the table if the value is |True|.
.. py:attribute:: iteration_length
:type: int
The number of iterations to write a table.
This value used in :py:meth:`.write_table_iter` method.
(defaults to ``-1`` which means number of iterations is indefinite)
.. py:attribute:: style_filter_kwargs
:type: Dict[str, Any]
Extra keyword arguments for style filter functions.
These arguments will passing to filter functions added by
:py:meth:`.add_style_filter` or :py:meth:`.add_col_separator_style_filter`
.. py:attribute:: colorize_terminal
:type: bool
:value: True
[Only for text format writers] [experimental]
If |True|, colorize text outputs with |Style|.
.. py:attribute:: enable_ansi_escape
:type: bool
:value: True
[Only for text format writers]
If |True|, applies ANSI escape sequences to terminal's text outputs
with |Style|.
.. py:attribute:: write_callback
The value expected to a function.
The function called when for each of the iteration of writing a table
completed. (defaults to |None|)
Example, callback function definition is as follows:
.. code:: python
def callback_example(iter_count, iter_length):
print("{:d}/{:d}".format(iter_count, iter_length))
Arguments that passed to the callback is:
- first argument: current iteration number (start from ``1``)
- second argument: a total number of iteration
"""
@property
def margin(self) -> int:
raise NotImplementedError()
@margin.setter
def margin(self, value: int) -> None:
raise NotImplementedError()
@property
def header_list(self):
warnings.warn("'header_list' has moved to 'headers'", DeprecationWarning)
return self.headers
@header_list.setter
def header_list(self, value):
warnings.warn("'header_list' has moved to 'headers'", DeprecationWarning)
self.headers = value
@property
def value_matrix(self) -> Sequence:
"""Data of a table to be outputted."""
return self.__value_matrix_org
@value_matrix.setter
def value_matrix(self, value_matrix: Sequence) -> None:
self.__set_value_matrix(value_matrix)
self.__clear_preprocess()
@property
def table_format(self):
"""Get the format of the writer.
Returns:
TableFormat:
"""
from .._table_format import TableFormat
return TableFormat.from_name(self.format_name)
@property
def type_hint_list(self):
warnings.warn("'type_hint_list' has moved to 'type_hints'", DeprecationWarning)
return self.type_hints
@type_hint_list.setter
def type_hint_list(self, value):
warnings.warn("'type_hint_list' has moved to 'type_hints'", DeprecationWarning)
self.type_hints = value
@property
def styles(self):
warnings.warn("'styles' has moved to 'column_styles'", DeprecationWarning)
return self.column_styles
@styles.setter
def styles(self, value):
warnings.warn("'styles' has moved to 'column_styles'", DeprecationWarning)
self.column_styles = value
@property
def style_list(self):
warnings.warn("'style_list' has moved to 'column_styles'", DeprecationWarning)
return self.column_styles
@style_list.setter
def style_list(self, value):
warnings.warn("'style_list' has moved to 'column_styles'", DeprecationWarning)
self.column_styles = value
@property
def value_preprocessor(self):
return self._dp_extractor.preprocessor
@value_preprocessor.setter
def value_preprocessor(self, value):
warnings.warn(
"this setter will be deleted in the future. use update_preprocessor instead",
DeprecationWarning,
)
if self._dp_extractor.preprocessor == value:
return
self._dp_extractor.preprocessor = value
self.__clear_preprocess()
@property
def stream(self):
return self._stream
@stream.setter
def stream(self, value) -> None:
self._stream = value
@abc.abstractmethod
def _write_table(self, **kwargs) -> None:
pass
def __init__(self, **kwargs) -> None:
self._logger = WriterLogger(self)
self.table_name = kwargs.get("table_name", "")
self.value_matrix = kwargs.get("value_matrix", [])
self.is_write_header = kwargs.get("is_write_header", True)
self.is_write_header_separator_row = kwargs.get("is_write_header_separator_row", True)
self.is_write_value_separator_row = kwargs.get("is_write_value_separator_row", False)
self.is_write_opening_row = kwargs.get("is_write_opening_row", False)
self.is_write_closing_row = kwargs.get("is_write_closing_row", False)
self._use_default_header = False
self._dp_extractor = DataPropertyExtractor(max_precision=kwargs.get("max_precision"))
self._dp_extractor.min_column_width = 1
self._dp_extractor.strip_str_header = '"'
self._dp_extractor.preprocessor = Preprocessor(dequote=kwargs.get("dequote", True))
self._dp_extractor.type_value_map[Typecode.NONE] = ""
self._dp_extractor.matrix_formatting = MatrixFormatting.HEADER_ALIGNED
self._dp_extractor.update_strict_level_map({Typecode.BOOL: 1})
self.is_formatting_float = kwargs.get("is_formatting_float", True)
self.is_padding = kwargs.get("is_padding", True)
self.headers = kwargs.get("headers", [])
self.type_hints = kwargs.get("type_hints", [])
self._quoting_flags = {
Typecode.BOOL: False,
Typecode.DATETIME: True,
Typecode.DICTIONARY: False,
Typecode.INFINITY: False,
Typecode.INTEGER: False,
Typecode.IP_ADDRESS: True,
Typecode.LIST: False,
Typecode.NAN: False,
Typecode.NONE: False,
Typecode.NULL_STRING: True,
Typecode.REAL_NUMBER: False,
Typecode.STRING: True,
}
self._is_require_table_name = False
self._is_require_header = False
self.iteration_length = kwargs.get("iteration_length", -1)
self.write_callback = kwargs.get(
"write_callback", lambda _iter_count, _iter_length: None # defaults to NOP callback
)
self._iter_count: Optional[int] = None
self.__default_style: Style
self.default_style = kwargs.get("default_style", Style())
self.__col_style_list: List[Optional[Style]] = []
self.column_styles = kwargs.get("column_styles", [])
self._style_filters: List[StyleFilterFunc] = []
self._styler = self._create_styler(self)
self.style_filter_kwargs: Dict[str, Any] = kwargs.get("style_filter_kwargs", {})
self.__colorize_terminal = kwargs.get("colorize_terminal", True)
self.__enable_ansi_escape = kwargs.get("enable_ansi_escape", True)
self.max_workers = kwargs.get("max_workers", 1)
if "dataframe" in kwargs:
self.from_dataframe(kwargs["dataframe"])
self.__clear_preprocess()
def _repr_html_(self) -> str:
from .text._html import HtmlTableWriter
writer = HtmlTableWriter(
table_name=self.table_name,
headers=self.headers,
value_matrix=self.value_matrix,
column_styles=self.column_styles,
colorize_terminal=self.colorize_terminal,
enable_ansi_escape=self.enable_ansi_escape,
)
writer._dp_extractor = self._dp_extractor
return writer.dumps()
def __clear_preprocess_status(self) -> None:
self._is_complete_table_dp_preprocess = False
self._is_complete_table_property_preprocess = False
self._is_complete_header_preprocess = False
self._is_complete_value_matrix_preprocess = False
def __clear_preprocess_data(self) -> None:
self._column_dp_list: List[ColumnDataProperty] = []
self._table_headers: List[str] = []
self._table_value_matrix: List[Union[List[str], Dict]] = []
self._table_value_dp_matrix: Sequence[Sequence[DataProperty]] = []
@property
def headers(self) -> Sequence[str]:
"""Headers of a table to be outputted."""
return self._dp_extractor.headers
@headers.setter
def headers(self, value: Sequence[str]) -> None:
self._dp_extractor.headers = value
@property
def is_formatting_float(self) -> bool:
return self._dp_extractor.is_formatting_float
@is_formatting_float.setter
def is_formatting_float(self, value: bool) -> None:
if self._dp_extractor.is_formatting_float == value:
return
self._dp_extractor.is_formatting_float = value
self.__clear_preprocess()
@property
def max_workers(self) -> int:
return self._dp_extractor.max_workers
@max_workers.setter
def max_workers(self, value: Optional[int]) -> None:
self._dp_extractor.max_workers = value
@property
def tabledata(self) -> TableData:
"""Get tabular data of the writer.
Returns:
tabledata.TableData:
"""
return TableData(
self.table_name,
self.headers,
self.value_matrix,
max_workers=self.max_workers,
max_precision=self._dp_extractor.max_precision,
)
@property
def table_name(self) -> str:
"""Name of a table."""
return self._table_name
@table_name.setter
def table_name(self, value: str) -> None:
self._table_name = value
@property
def type_hints(self) -> List[TypeHint]:
"""
Type hints for each column of the tabular data.
Writers convert data for each column using the type hints information
before writing tables when you call ``write_xxx`` methods.
Acceptable values are as follows:
- |None| (automatically detect column type from values in the column)
- :py:class:`pytablewriter.typehint.Bool` or ``"bool"``
- :py:class:`pytablewriter.typehint.DateTime` or ``"datetime"``
- :py:class:`pytablewriter.typehint.Dictionary` or ``"dict"``
- :py:class:`pytablewriter.typehint.Infinity` or ``"inf"``
- :py:class:`pytablewriter.typehint.Integer` or ``"int"``
- :py:class:`pytablewriter.typehint.IpAddress` or ``"ipaddr"``
- :py:class:`pytablewriter.typehint.List` or ``"list"``
- :py:class:`pytablewriter.typehint.Nan` or ``"nan"``
- :py:class:`pytablewriter.typehint.NoneType` or ``"none"``
- :py:class:`pytablewriter.typehint.NullString` or ``"nullstr"``
- :py:class:`pytablewriter.typehint.RealNumber` or ``"realnumber"`` or ``"float"``
- :py:class:`pytablewriter.typehint.String` or ``"str"``
If a type-hint value is not |None|, the writer tries to
convert data for each data in a column to type-hint class.
If the type-hint value is |None| or failed to convert data,
the writer automatically detect column data type from
the column data.
If ``type_hints`` is |None|, the writer detects data types for all
of the columns automatically and writes a table by using detected column types.
Defaults to |None|.
:Examples:
- :ref:`example-type-hint-js`
- :ref:`example-type-hint-python`
"""
return self._dp_extractor.column_type_hints
@type_hints.setter
def type_hints(self, value: Sequence[Union[str, TypeHint]]) -> None:
hints = list(value)
if self.type_hints == hints:
return
self.__set_type_hints(hints)
self.__clear_preprocess()
@property
def default_style(self) -> Style:
"""Default |Style| of table cells."""
return self.__default_style
@default_style.setter
def default_style(self, style: Optional[Style]) -> None:
if style is None:
style = Style()
if not isinstance(style, Style):
raise TypeError("default_style must be a Style instance")
try:
if self.__default_style == style:
return
except AttributeError:
# not yet initialized
pass
self.__default_style = style
self._dp_extractor.default_format_flags = _ts_to_flag[
self.__default_style.thousand_separator
]
self.__clear_preprocess()
@property
def column_styles(self) -> List[Optional[Style]]:
"""Output |Style| for each column.
Returns:
list of |Style|:
"""
return self.__col_style_list
@column_styles.setter
def column_styles(self, value: Sequence[Optional[Style]]) -> None:
if self.__col_style_list == value:
return
self.__col_style_list = list(value)
if self.__col_style_list:
self._dp_extractor.format_flags_list = [
_ts_to_flag[self._get_col_style(col_idx).thousand_separator]
for col_idx in range(len(self.__col_style_list))
]
else:
self._dp_extractor.format_flags_list = []
self.__clear_preprocess()
@property
def colorize_terminal(self) -> bool:
return self.__colorize_terminal
@colorize_terminal.setter
def colorize_terminal(self, value: bool) -> None:
if self.__colorize_terminal == value:
return
self.__colorize_terminal = value
self.__clear_preprocess()
@property
def enable_ansi_escape(self) -> bool:
return self.__enable_ansi_escape
@enable_ansi_escape.setter
def enable_ansi_escape(self, value: bool) -> None:
if self.__enable_ansi_escape == value:
return
self.__enable_ansi_escape = value
self.__clear_preprocess()
@property
def _quoting_flags(self) -> Dict[Typecode, bool]:
return self._dp_extractor.quoting_flags
@_quoting_flags.setter
def _quoting_flags(self, value: Mapping[Typecode, bool]) -> None:
self._dp_extractor.quoting_flags = value
self.__clear_preprocess()
def add_style_filter(self, style_filter: StyleFilterFunc) -> None:
"""Add a style filter function to the writer.
Args:
style_filter:
A function that called for each | |
from .BaseStep import BaseStep
from ..data.Posts import Posts
import tomotopy as tp
import pandas as pd
import numpy as np
import json
import csv
import random
import statistics
from collections import Iterable
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import seaborn as sns
sns.set_style('whitegrid')
import warnings
warnings.filterwarnings("ignore")
class PostProcessing(BaseStep):
def __init__(self):
super().__init__('Post-processing')
self.__modelFile = 'results/model.bin'
self.__model = None
self.__experimentsFile = 'results/experiments.csv'
self.__experiments = None
self.__posts = Posts(preProcessed=True, memory=False, splitted=True)
self.__topicsFile = 'results/topics.csv'
self.__labeledTopicsFile = 'results/labeled-topics.csv'
self.__topicsFields = ['topic', 'label', 'words']
self.__labels = None
self.__generalPopularityFile = 'results/general-popularity.csv'
self.__generalPopularityFields = ['topic', 'semester', 'popularity']
self.__generalDriftFile = 'results/general-drift.csv'
self.__generalDriftFields = ['topic', 'mean', 'variance', 'drift']
self.__generalTrendsFile = 'results/general-trends.csv'
self.__generalTrendsFields = ['topic', 'popularity']
self.__userPopularityFile = 'results/user-popularity.csv'
self.__userPopularityFields = ['user'] + self.__generalPopularityFields
self.__userDriftFile = 'results/user-drift.csv'
self.__userDriftFields = ['user'] + self.__generalDriftFields
self.__userTrendsFile = 'results/user-trends.csv'
self.__userTrendsFields = ['user'] + self.__generalTrendsFields
def __createCSV(self, csvName, fields):
with open(csvName, 'w', newline='') as csvFile:
writer = csv.DictWriter(csvFile, fieldnames=fields)
writer.writeheader()
def __appendToCSV(self, csvName, data):
with open(csvName, 'a', newline='') as csvFile:
writer = csv.DictWriter(csvFile, fieldnames=data.keys())
writer.writerow(data)
def __extractTopics(self):
print(' Extracting topics')
# Create CSV
self.__createCSV(self.__topicsFile, self.__topicsFields)
for topic in range(self.__model.k):
self.__appendToCSV(
self.__topicsFile,
{
'topic': topic,
'label': 'unknown',
'words': ' '.join([ t[0] for t in self.__model.get_topic_words(topic) ]),
}
)
def __loadLabeledTopics(self):
try:
df = pd.read_csv(self.__labeledTopicsFile, header=0)
self.__labels = df.label.tolist()
except:
pass
def __createCoherenceChart(self):
print(' Creating coherence chart')
months = self.__experiments['num_topics'].tolist()
popularities = self.__experiments['iterations'].tolist()
Z = self.__experiments['coherence'].tolist()
Z_max = max(Z)
index = Z.index(Z_max)
X_max = months[index]
Y_max = popularities[index]
fig = plt.figure(figsize=(8,5))
ax = fig.gca(projection='3d')
surface = ax.plot_trisurf(months, popularities, Z, cmap=cm.coolwarm, linewidth=0)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.yaxis.set_major_formatter(FormatStrFormatter('%d'))
fig.colorbar(surface, shrink=0.5, aspect=5)
fig.suptitle('Best experiment: iterations={} topics={} coherence={:.4f}'.format(Y_max, X_max, Z_max))
fig.tight_layout()
ax.set_xlabel('Number of topics')
ax.set_ylabel('Iterations')
ax.set_zlabel('Coherence')
plt.savefig('results/Coherence-Chart.png', dpi=600)
plt.clf()
def __normalizeTopics(self, topics):
if len(topics) == 0:
return []
normalizer = 1 / float( sum([ weight for _, weight in topics ]) )
return [ (topic, weight*normalizer) for topic, weight in topics ]
def __getTopics(self, topicDistribution, threshold=0.1):
# Map topics
topics = list(zip(range(len(topicDistribution)), topicDistribution))
topics.sort(key=lambda value: value[1], reverse=True)
# Remove topics below threshold and normalize
return self.__normalizeTopics([ (topic, weight) for topic, weight in topics if weight >= threshold ])
def __initCalculator(self):
return {'count': 0, 'weightSum': [0]*self.__model.k}
def __saveDrift(self, semesterCount, popularities, csvName, user=None):
for topic in popularities.keys():
lengthDifference = semesterCount - len(popularities[topic])
if lengthDifference > 0:
popularities[topic] = [0]*lengthDifference + popularities[topic]
mean = statistics.mean(popularities[topic])
variance = statistics.pvariance(popularities[topic], mu=mean)
drift = variance**0.5
if not user:
self.__appendToCSV(
csvName,
{
'topic': topic,
'mean': mean,
'variance': variance,
'drift': drift,
}
)
else:
self.__appendToCSV(
csvName,
{
'user': user,
'topic': topic,
'mean': mean,
'variance': variance,
'drift': drift,
}
)
def __computeUserPopularity(self):
print(' Computing user popularity')
self.__countEmpty = 0
numPosts = len(self.__model.docs)
calculation = {}
index = -1
for post in self.__posts:
# Stop when reach the last post in the model
if index+1 == numPosts:
break
# Compute the metrics if content is not empty
elif len(post['content']) > 0:
# Get topics
index += 1
content = self.__model.docs[index]
topics = self.__getTopics(content.get_topic_dist())
# Check if post has topics
if len(topics) == 0:
self.__countEmpty += 1
continue
# Get data
user = post['user']
year, month, day = post['date'].split('-')
semester = f'{year}.{1 if int(month) < 7 else 2}'
# Adjust dict of users and semesters
if not user in calculation.keys():
calculation[user] = {semester: self.__initCalculator()}
elif not semester in calculation[user].keys():
calculation[user][semester] = self.__initCalculator()
# Increment posts counter
calculation[user][semester]['count'] += 1
# Sum weight for each topic
for topic, weight in topics:
calculation[user][semester]['weightSum'][topic] += weight
# Print some results
print(' Number of users:', len(calculation.keys()))
print(' Posts with empty topics:', self.__countEmpty)
# Initialize CSVs
self.__createCSV(self.__userPopularityFile, self.__userPopularityFields)
self.__createCSV(self.__userDriftFile, self.__userDriftFields)
self.__createCSV(self.__userTrendsFile, self.__userTrendsFields)
# Finish relative popularity calculation
computedCount = 0
for user in calculation.keys():
popularities = {}
trendPopularityCalculation = self.__initCalculator()
for semester in calculation[user].keys():
trendPopularityCalculation['count'] += calculation[user][semester]['count']
for topic in range(self.__model.k):
# Check if metric must be computed
if calculation[user][semester]['weightSum'][topic] == 0:
if topic in popularities.keys():
popularities[topic].append(0)
continue
# Compute populatities
trendPopularityCalculation['weightSum'][topic] += calculation[user][semester]['weightSum'][topic]
popularity = calculation[user][semester]['weightSum'][topic] / calculation[user][semester]['count']
computedCount += 1
# Append relative popularity to compute variance
if topic not in popularities.keys():
popularities[topic] = []
popularities[topic].append(popularity)
# Insert popularity to csv
self.__appendToCSV(
self.__userPopularityFile,
{
'user': user,
'topic': topic,
'semester': semester,
'popularity': popularity,
}
)
# Insert trend popularity to csv
trendPopularities = [
trendPopularityCalculation['weightSum'][topic] / trendPopularityCalculation['count']
for topic in range(self.__model.k)
]
for topic in range(self.__model.k):
if trendPopularityCalculation['weightSum'][topic] > 0:
computedCount += 1
popularity = trendPopularityCalculation['weightSum'][topic] / trendPopularityCalculation['count']
self.__appendToCSV(
self.__userTrendsFile,
{
'user': user,
'topic': topic,
'popularity': popularity,
}
)
# Compute drift
popularities = { topic: popularities[topic] for topic in sorted(popularities.keys()) }
self.__saveDrift(len(calculation[user].keys()), popularities, self.__userDriftFile, user)
computedCount += 4
print(' Computed metrics:', computedCount)
def __computeGeneralPopularity(self):
print(' Computing general popularity')
self.__countEmpty = 0
numPosts = len(self.__model.docs)
calculation = {}
index = -1
for post in self.__posts:
# Stop when reach the last post in the model
if index+1 == numPosts:
break
# Compute the metrics if content is not empty
elif len(post['content']) > 0:
# Get topics
index += 1
content = self.__model.docs[index]
topics = self.__getTopics(content.get_topic_dist())
# Check if post has topics
if len(topics) == 0:
self.__countEmpty += 1
continue
# Get semester
year, month, day = post['date'].split('-')
semester = f'{year}.{1 if int(month) < 7 else 2}'
# Adjust dict of semesters
if not semester in calculation.keys():
calculation[semester] = self.__initCalculator()
# Increment posts counter
calculation[semester]['count'] += 1
# Sum weight for each topic
for topic, weight in topics:
calculation[semester]['weightSum'][topic] += weight
# Print some results
print(' Posts with empty topics:', self.__countEmpty)
# Initialize CSVs
self.__createCSV(self.__generalPopularityFile, self.__generalPopularityFields)
self.__createCSV(self.__generalDriftFile, self.__generalDriftFields)
self.__createCSV(self.__generalTrendsFile, self.__generalTrendsFields)
# Finish relative popularity calculation
popularities = {}
trendPopularityCalculation = self.__initCalculator()
computedCount = 0
for semester in calculation.keys():
trendPopularityCalculation['count'] += calculation[semester]['count']
for topic in range(self.__model.k):
# Check if metric must be computed
if calculation[semester]['weightSum'][topic] == 0:
if topic in popularities.keys():
popularities[topic].append(0)
continue
# Compute populatities
trendPopularityCalculation['weightSum'][topic] += calculation[semester]['weightSum'][topic]
popularity = calculation[semester]['weightSum'][topic] / calculation[semester]['count']
computedCount += 1
# Append relative popularity to compute variance
if topic not in popularities.keys():
popularities[topic] = []
popularities[topic].append(popularity)
# Insert popularity to csv
self.__appendToCSV(
self.__generalPopularityFile,
{
'topic': topic,
'semester': semester,
'popularity': popularity,
}
)
# Insert trend popularity to csv
trendPopularities = [
trendPopularityCalculation['weightSum'][topic] / trendPopularityCalculation['count']
for topic in range(self.__model.k)
]
for topic in range(self.__model.k):
if trendPopularityCalculation['weightSum'][topic] > 0:
computedCount += 1
popularity = trendPopularityCalculation['weightSum'][topic] / trendPopularityCalculation['count']
self.__appendToCSV(
self.__generalTrendsFile,
{
'topic': topic,
'popularity': popularity,
}
)
# Compute drift
popularities = { topic: popularities[topic] for topic in sorted(popularities.keys()) }
self.__saveDrift(len(calculation.keys()), popularities, self.__generalDriftFile)
computedCount += 4
print(' Computed metrics:', computedCount)
def __saveChart(self, yLabel, xTicks, path, legends=True):
if isinstance(yLabel, str): plt.ylabel(yLabel)
if legends: plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), ncol=4, fontsize='small', borderaxespad=0, labelspacing=0.8)
if isinstance(xTicks, Iterable): plt.xticks(xTicks, rotation=45, ha='left')
plt.tight_layout()
plt.savefig(path, dpi=600)
plt.clf()
def __createUserCharts(self):
print(' Creating user popularity charts')
originalPopularityDf = pd.read_csv(self.__userPopularityFile, header=0)
originaldriftDf = pd.read_csv(self.__userDriftFile, header=0)
originalTrendsDf = pd.read_csv(self.__userTrendsFile, header=0)
users = originaldriftDf.user.unique()
count = 0
random.seed(0)
random.shuffle(users)
for user in users:
# Stop 10 users was analyzed
if count == 10:
break
# Skip user if his contribution ir lower than one year
popularityDf = originalPopularityDf.loc[originalPopularityDf.user == user]
if len(popularityDf.semester.unique()) < 12:
continue
count += 1
driftDf = originaldriftDf.loc[originaldriftDf.user == user]
trendsDf = originalTrendsDf.loc[originalTrendsDf.user == user]
semesters = popularityDf.semester.unique()
popularitiesByMonth = []
drifts = []
trendPopularities = []
topics = []
for topic in range(int(self.__experiment.num_topics)):
popularities = []
for i in range(len(semesters)):
semester = semesters[i]
popularitiesRows = popularityDf.loc[(popularityDf.semester == semester) & (popularityDf.topic == topic)]
if len(popularitiesRows) == 0:
popularities.append(0)
else:
popularities.append(popularitiesRows.iloc[-1].popularity)
if (any([ value for value in popularities if value != 0 ])):
popularitiesByMonth.append(popularities)
topics.append(topic)
if topic in topics:
driftRows = driftDf.loc[driftDf.topic == topic]
drifts.append(driftRows.iloc[-1].drift)
trendsRows = trendsDf.loc[trendsDf.topic == topic]
trendPopularities.append(trendsRows.iloc[-1].popularity)
# Create palette
palette = sns.color_palette('muted', 10) + sns.color_palette('colorblind', 10) + sns.color_palette('dark', 10)
# Load topic labels if possible
labels = [ self.__labels[i] for i in topics ] if isinstance(self.__labels, list) else topics
enum = [x for x, | |
ensure_trailing_sep(start, sep)
if path.startswith(start_with_sep):
return path[len(start_with_sep) :]
raise ValueError(path, start)
def which(cmd):
which_cmd = "where" if get_platform() == "Windows" else "which"
devnull = open(os.devnull, "w")
try:
out = subprocess.check_output([which_cmd, cmd], stderr=devnull)
except subprocess.CalledProcessError:
return None
else:
assert out, cmd
return out.decode("utf-8").split(os.linesep)[0]
def symlink(target, link):
if get_platform() == "Windows":
_windows_symlink(target, link)
else:
os.symlink(target, link)
def copyfile(src, dest):
shutil.copyfile(src, dest)
shutil.copymode(src, dest)
def _windows_symlink(target, link):
if os.path.isdir(target):
args = ["mklink", "/D", link, target]
else:
args = ["mklink", link, target]
try:
subprocess.check_output(args, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
err_msg = e.output.decode(errors="ignore").strip()
_maybe_symlink_error(err_msg, e.returncode)
raise OSError(e.returncode, err_msg)
def _maybe_symlink_error(err_msg, err_code):
if "You do not have sufficient privilege to perform this operation" in err_msg:
raise SystemExit(
"You do not have sufficient privilege to perform this operation\n\n"
"For help, see "
"https://my.guild.ai/docs/windows#symbolic-links-privileges-in-windows",
err_code,
)
_text_ext = set(
[
".csv",
".md",
".py",
".sh",
".txt",
]
)
_binary_ext = set(
[
".ai",
".bmp",
".gif",
".ico",
".jpeg",
".jpg",
".png",
".ps",
".psd",
".svg",
".tif",
".tiff",
".aif",
".mid",
".midi",
".mpa",
".mp3",
".ogg",
".wav",
".wma",
".avi",
".mov",
".mp4",
".mpeg",
".swf",
".wmv",
".7z",
".deb",
".gz",
".pkg",
".rar",
".rpm",
".tar",
".xz",
".z",
".zip",
".doc",
".docx",
".key",
".pdf",
".ppt",
".pptx",
".xlr",
".xls",
".xlsx",
".bin",
".pickle",
".pkl",
".pyc",
]
)
_control_chars = b'\n\r\t\f\b'
if bytes is str:
_printable_ascii = _control_chars + b"".join([chr(x) for x in range(32, 127)])
_printable_high_ascii = b"".join([chr(x) for x in range(127, 256)])
else:
_printable_ascii = _control_chars + bytes(range(32, 127))
_printable_high_ascii = bytes(range(127, 256))
def is_text_file(path, ignore_ext=False):
import chardet
# Adapted from https://github.com/audreyr/binaryornot under the
# BSD 3-clause License
if not os.path.exists(path):
raise OSError("%s does not exist" % path)
if not os.path.isfile(path):
return False
if not ignore_ext:
ext = os.path.splitext(path)[1].lower()
if ext in _text_ext:
return True
if ext in _binary_ext:
return False
try:
with open(path, 'rb') as f:
sample = f.read(1024)
except IOError:
return False
if not sample:
return True
low_chars = sample.translate(None, _printable_ascii)
nontext_ratio1 = float(len(low_chars)) / float(len(sample))
high_chars = sample.translate(None, _printable_high_ascii)
nontext_ratio2 = float(len(high_chars)) / float(len(sample))
likely_binary = (nontext_ratio1 > 0.3 and nontext_ratio2 < 0.05) or (
nontext_ratio1 > 0.8 and nontext_ratio2 > 0.8
)
detected_encoding = chardet.detect(sample)
decodable_as_unicode = False
if (
detected_encoding["confidence"] > 0.9
and detected_encoding["encoding"] != "ascii"
):
try:
try:
sample.decode(encoding=detected_encoding["encoding"])
except TypeError:
# pylint: disable=undefined-variable
unicode(sample, encoding=detected_encoding["encoding"])
decodable_as_unicode = True
except LookupError:
pass
except UnicodeDecodeError:
pass
if likely_binary:
return decodable_as_unicode
else:
if decodable_as_unicode:
return True
else:
if b'\x00' in sample or b'\xff' in sample:
return False
return True
def safe_is_text_file(path, ignore_ext=False):
try:
return is_text_file(path, ignore_ext)
except OSError as e:
log.warning("could not check for text file %s: %s", path, e)
return False
def touch(filename):
open(filename, "ab").close()
now = time.time()
os.utime(filename, (now, now))
def ensure_file(filename):
if not os.path.exists(filename):
touch(filename)
def getmtime(filename):
try:
return os.path.getmtime(filename)
except OSError:
return None
def kill_process_tree(pid, force=False, timeout=None, child_term_timeout=None):
import psutil
import signal
if force:
sig = signal.SIGKILL
else:
sig = signal.SIGTERM
root = psutil.Process(pid)
children = root.children(recursive=True)
all_procs = [root] + children
_safe_send_signal(root, sig)
if child_term_timeout is not None:
psutil.wait_procs(children, timeout=child_term_timeout)
for child in children:
_safe_send_signal(child, sig)
return psutil.wait_procs(all_procs, timeout=timeout)
def _safe_send_signal(proc, sig):
import psutil
try:
proc.send_signal(sig)
except psutil.NoSuchProcess:
pass
def safe_filesize(path):
try:
return os.path.getsize(path)
except OSError:
return None
def safe_mtime(path):
try:
return os.path.getmtime(path)
except OSError:
return None
def safe_list_remove(x, l):
safe_list_remove_all([x], l)
def safe_list_remove_all(xs, l):
for x in xs:
try:
l.remove(x)
except ValueError:
pass
def local_server_url(host, port):
import socket
if not host or host == "0.0.0.0":
host = socket.gethostname()
try:
# Verify that configured hostname is valid
socket.gethostbyname(host)
except socket.gaierror:
host = "localhost"
return "http://{}:{}".format(host, port)
def format_duration(start_time, end_time=None):
if start_time is None:
return None
if end_time is None:
end_time = time.time() * 1000000
seconds = (end_time - start_time) // 1000000
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%d:%02d:%02d" % (h, m, s)
def format_dir(dir):
return format_user_dir(os.path.abspath(dir))
def format_user_dir(s):
if get_platform() == "Windows":
return s
user_dir = os.path.expanduser("~")
if s.startswith(user_dir):
return os.path.join("~", s[len(user_dir) + 1 :])
return s
def apply_env(target, source, names):
for name in names:
try:
target[name] = source[name]
except KeyError:
pass
def safe_filename(s):
if get_platform() == "Windows":
s = re.sub(r"[:<>?]", "_", s)
return re.sub(r"[/\\]+", "_", s)
def wait_forever(sleep_interval=0.1):
while True:
time.sleep(sleep_interval)
class RunOutputReader(object):
def __init__(self, run_dir):
self.run_dir = run_dir
self._lines = []
self._output = None
self._index = None
def read(self, start=0, end=None):
"""Read run output from start to end.
Both start and end are zero-based indexes to run output lines
and are both inclusive. Note this is different from the Python
slice function where end is exclusive.
"""
self._read_next(end)
if end is None:
slice_end = None
else:
slice_end = end + 1
return self._lines[start:slice_end]
def _read_next(self, end):
if end is not None and end < len(self._lines):
return
try:
output, index = self._ensure_open()
except IOError as e:
if e.errno != errno.EEXIST:
raise
else:
lines = self._lines
while True:
line = output.readline().rstrip().decode()
if not line:
break
header = index.read(9)
if len(header) < 9:
break
time, stream = struct.unpack("!QB", header)
lines.append((time, stream, line))
if end is not None and end < len(self._lines):
break
def _ensure_open(self):
if self._output is None:
guild_path = os.path.join(self.run_dir, ".guild")
output = open(os.path.join(guild_path, "output"), "rb")
index = open(os.path.join(guild_path, "output.index"), "rb")
self._output, self._index = output, index
assert self._output is not None
assert self._index is not None
return self._output, self._index
def close(self):
self._try_close(self._output)
self._try_close(self._index)
@staticmethod
def _try_close(f):
if f is None:
return
try:
f.close()
except IOError:
pass
def gpu_available():
import ctypes
if "linux" in sys.platform:
lib = "libcublas.so"
elif sys.platform == "darwin":
lib = "libcublas.dylib"
elif sys.platform == "win32":
lib = "cublas.dll"
else:
log.warning("unable to detect GPU for platform '%s'", sys.platform)
lib = None
if lib:
log.debug("checking for GPU by loading %s", lib)
try:
ctypes.CDLL(lib)
except OSError as e:
log.debug("error loading '%s': %s", lib, e)
else:
log.debug("%s loaded", lib)
return True
return False
def get_env(name, type, default=None):
try:
val = os.environ[name]
except KeyError:
return default
else:
try:
return type(val)
except Exception as e:
log.warning("error converting env %s to %s: %s", name, type, e)
return None
def del_env(names):
for name in names:
try:
del os.environ[name]
except KeyError:
pass
def python_interpreters():
import glob
from guild import config
bin_dir = os.path.dirname(config.python_exe())
ret = []
for path in glob.glob(os.path.join(bin_dir, "python*")):
m = re.match(r"python([0-9\.]+)$", os.path.basename(path))
if m:
ret.append((path, m.group(1)))
return ret
def find_python_interpreter(version_spec):
import pkg_resources
try:
# Requirement.parse wants a package name, so we use 'python'
# here, but anything would do.
req = pkg_resources.Requirement.parse("python%s" % version_spec)
except pkg_resources.RequirementParseError:
raise ValueError(version_spec)
python_interps = {ver: path for path, ver in python_interpreters()}
matching = list(req.specifier.filter(sorted(python_interps)))
if not matching:
return None
matching_ver = matching[0]
return python_interps[matching_ver], matching_ver
def is_executable_file(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
def copytree(src, dest, preserve_links=True):
from distutils import dir_util
dir_util.copy_tree(src, dest, preserve_symlinks=preserve_links)
def select_copytree(src, dest, config, copy_filter=None):
if not isinstance(config, list):
raise ValueError("invalid config: expected list got %r" % config)
log.debug("copying files from %s to %s", src, dest)
to_copy = _select_files_to_copy(src, config, copy_filter)
if not to_copy:
log.debug("no files to copy")
return
for file_src, file_src_rel_path in to_copy:
file_dest = os.path.join(dest, file_src_rel_path)
log.debug("copying file %s to %s", file_src, file_dest)
ensure_dir(os.path.dirname(file_dest))
_try_copy_file(file_src, file_dest)
def _select_files_to_copy(src_dir, config, copy_filter):
to_copy = []
seen_dirs = set()
log.debug("generating file list from %s", src_dir)
for root, dirs, files in os.walk(src_dir, followlinks=True):
seen_dirs.add(realpath(root))
_del_excluded_select_copy_dirs(
dirs, src_dir, root, seen_dirs, config, copy_filter
)
for name in files:
path = os.path.join(root, name)
if not os.path.isfile(path):
continue
rel_path = os.path.relpath(path, src_dir)
log.debug("considering file to copy %s", path)
if _select_to_copy(path, rel_path, config, copy_filter):
log.debug("seleted file to copy %s", path)
to_copy.append((path, rel_path))
# Sort before notifying copy_filter to have deterministic result.
to_copy.sort()
if copy_filter:
copy_filter.pre_copy(to_copy)
return to_copy
def _del_excluded_select_copy_dirs(dirs, src_dir, root, seen_dirs, config, copy_filter):
_del_seen_dirs(dirs, root, seen_dirs)
_del_config_excluded_dirs(dirs, src_dir, root, config)
if copy_filter:
copy_filter.delete_excluded_dirs(root, dirs)
def _del_seen_dirs(dirs, root, seen):
for dir_name in dirs:
real_path = realpath(os.path.join(root, dir_name))
if real_path in seen:
dirs.remove(dir_name)
def _del_config_excluded_dirs(dirs, src_dir, root, config):
for name in list(dirs):
path = os.path.join(root, name)
rel_path = os.path.relpath(path, src_dir)
if not _select_to_copy(path, rel_path, config):
dirs.remove(name)
def _select_to_copy(path, rel_path, config, copy_filter=None):
assert isinstance(config, list)
last_match = None
for config_item in config:
for spec in config_item.specs:
if _select_file_match(rel_path, spec):
last_match = spec
if last_match:
return _select_to_copy_for_spec(last_match)
if copy_filter:
return copy_filter.default_select_path(path)
return True
def _select_file_match(rel_path, spec):
return any((fnmatch.fnmatch(rel_path, p) for p in spec.patterns))
def _select_to_copy_for_spec(spec):
return spec.type == "include"
def _try_copy_file(src, dest):
try:
shutil.copyfile(src, dest)
except (IOError, OSError) as e:
| |
"oso goiz"],
"arratsaldea": ["arratsa", "bazkalostea", "arratsalde", "arrats"],
"gaua": ["iluntzea", "berandu", "gau", "gaba"]}
for syn in synonyms:
for word in synonyms[syn]:
s = s.replace(" " + word + " ", " " + syn + " ")
# relevant plurals
wordlist = ["goizak", "arratsaldeak", "gauak", "egunak", "asteak",
"urteak", "minutuak", "segunduak", "hurrengoak",
"datozenak", "orduak", "hilabeteak"]
for _, word in enumerate(wordlist):
s = s.replace(word, word.rstrip('ak'))
# s = s.replace("meses", "mes").replace("anteriores", "anterior")
return s
def date_found():
return found or \
(
datestr != "" or
yearOffset != 0 or monthOffset != 0 or
dayOffset is True or hrOffset != 0 or
hrAbs or minOffset != 0 or
minAbs or secOffset != 0
)
if input_str == "":
return None
if anchorDate is None:
anchorDate = datetime.now()
found = False
daySpecified = False
dayOffset = False
monthOffset = 0
yearOffset = 0
dateNow = anchorDate
today = dateNow.strftime("%w")
currentYear = dateNow.strftime("%Y")
fromFlag = False
datestr = ""
hasYear = False
timeQualifier = ""
words = clean_string(input_str).split(" ")
timeQualifiersList = ['goiza', 'arratsaldea', 'gaua']
time_indicators = ["en", "la", "al", "por", "pasados",
"pasadas", "día", "hora"]
days = ['astelehena', 'asteartea', 'asteazkena',
'osteguna', 'ostirala', 'larunbata', 'igandea']
months = ['urtarrila', 'otsaila', 'martxoa', 'apirila', 'maiatza', 'ekaina',
'uztaila', 'abuztua', 'iraila', 'urria', 'azaroa',
'abendua']
monthsShort = ['urt', 'ots', 'mar', 'api', 'mai', 'eka', 'uzt', 'abu',
'ira', 'urr', 'aza', 'abe']
nexts = ["hurrengo", "datorren", "ondorengo"]
suffix_nexts = ["barru"]
lasts = ["azken", "duela"]
suffix_lasts = ["aurreko"]
nxts = ["ondorengo", "hurrengo", "datorren"]
prevs = ["aurreko", "duela", "previo", "anterior"]
# TODO
froms = ["desde", "en", "para", "después de", "por", "próximo",
"próxima", "de"]
thises = ["hau"]
froms += thises
lists = nxts + prevs + froms + time_indicators
for idx, word in enumerate(words):
if word == "":
continue
wordPrevPrev = words[idx - 2] if idx > 1 else ""
wordPrev = words[idx - 1] if idx > 0 else ""
wordNext = words[idx + 1] if idx + 1 < len(words) else ""
wordNextNext = words[idx + 2] if idx + 2 < len(words) else ""
wordNextNextNext = words[idx + 3] if idx + 3 < len(words) else ""
start = idx
used = 0
# save timequalifier for later
if word in timeQualifiersList:
timeQualifier = word
# parse today, tomorrow, yesterday
elif (word == "gaur" or word == "gaurko") and not fromFlag:
dayOffset = 0
used += 1
elif (word == "bihar" or word == "biharko") and not fromFlag:
dayOffset = 1
used += 1
elif (word == "atzo" or word == "atzoko") and not fromFlag:
dayOffset -= 1
used += 1
# before yesterday
elif (word == "herenegun" or word == "herenegungo") and not fromFlag:
dayOffset -= 2
used += 1
# if wordNext == "ayer":
# used += 1
# elif word == "ante" and wordNext == "ante" and wordNextNext == \
# "ayer" and not fromFlag:
# dayOffset -= 3
# used += 3
# elif word == "ante anteayer" and not fromFlag:
# dayOffset -= 3
# used += 1
# day after tomorrow
elif (word == "etzi" or word == "etziko") and not fromFlag:
dayOffset += 2
used = 1
elif (word == "etzidamu" or word == "etzidamuko") and not fromFlag:
dayOffset += 3
used = 1
# parse 5 days, 10 weeks, last week, next week, week after
elif word == "egun" or word == "eguna" or word == "eguneko":
if wordPrevPrev and wordPrevPrev == "duela":
used += 1
if wordPrev and wordPrev[0].isdigit():
dayOffset -= int(wordPrev)
start -= 1
used += 1
elif (wordPrev and wordPrev[0].isdigit() and
wordNext not in months and
wordNext not in monthsShort):
dayOffset += int(wordPrev)
start -= 1
used += 2
elif wordNext and wordNext[0].isdigit() and wordNextNext not in \
months and wordNextNext not in monthsShort:
dayOffset += int(wordNext)
start -= 1
used += 2
elif word == "aste" or word == "astea" or word == "asteko" and not fromFlag:
if wordPrev[0].isdigit():
dayOffset += int(wordPrev) * 7
start -= 1
used = 2
for w in nexts:
if wordPrev == w:
dayOffset = 7
start -= 1
used = 2
for w in lasts:
if wordPrev == w:
dayOffset = -7
start -= 1
used = 2
for w in suffix_nexts:
if wordNext == w:
dayOffset = 7
start -= 1
used = 2
for w in suffix_lasts:
if wordNext == w:
dayOffset = -7
start -= 1
used = 2
# parse 10 months, next month, last month
elif word == "hilabete" or word == "hilabetea" or word == "hilabeteko" and not fromFlag:
if wordPrev[0].isdigit():
monthOffset = int(wordPrev)
start -= 1
used = 2
for w in nexts:
if wordPrev == w:
monthOffset = 7
start -= 1
used = 2
for w in lasts:
if wordPrev == w:
monthOffset = -7
start -= 1
used = 2
for w in suffix_nexts:
if wordNext == w:
monthOffset = 7
start -= 1
used = 2
for w in suffix_lasts:
if wordNext == w:
monthOffset = -7
start -= 1
used = 2
# parse 5 years, next year, last year
elif word == "urte" or word == "urtea" or word == "urteko" and not fromFlag:
if wordPrev[0].isdigit():
yearOffset = int(wordPrev)
start -= 1
used = 2
for w in nexts:
if wordPrev == w:
yearOffset = 1
start -= 1
used = 2
for w in lasts:
if wordPrev == w:
yearOffset = -1
start -= 1
used = 2
for w in suffix_nexts:
if wordNext == w:
yearOffset = 1
start -= 1
used = 2
for w in suffix_lasts:
if wordNext == w:
yearOffset = -1
start -= 1
used = 2
# parse Monday, Tuesday, etc., and next Monday,
# last Tuesday, etc.
elif word in days and not fromFlag:
d = days.index(word)
dayOffset = (d + 1) - int(today)
used = 1
if dayOffset < 0:
dayOffset += 7
if wordPrev == "hurrengo":
dayOffset += 7
used += 1
start -= 1
elif wordPrev == "aurreko":
dayOffset -= 7
used += 1
start -= 1
if wordNext == "hurrengo":
# dayOffset += 7
used += 1
elif wordNext == "aurreko":
# dayOffset -= 7
used += 1
# parse 15 of July, June 20th, Feb 18, 19 of February
elif word in months or word in monthsShort:
try:
m = months.index(word)
except ValueError:
m = monthsShort.index(word)
used += 1
datestr = months[m]
if wordPrev and wordPrev[0].isdigit():
# 13 mayo
datestr += " " + wordPrev
start -= 1
used += 1
if wordNext and wordNext[0].isdigit():
datestr += " " + wordNext
used += 1
hasYear = True
else:
hasYear = False
elif wordNext and wordNext[0].isdigit():
# mayo 13
datestr += " " + wordNext
used += 1
if wordNextNext and wordNextNext[0].isdigit():
datestr += " " + wordNextNext
used += 1
hasYear = True
else:
hasYear = False
elif wordPrevPrev and wordPrevPrev[0].isdigit():
# 13 dia mayo
datestr += " " + wordPrevPrev
start -= 2
used += 2
if wordNext and word[0].isdigit():
datestr += " " + wordNext
used += 1
hasYear = True
else:
hasYear = False
elif wordNextNext and wordNextNext[0].isdigit():
# mayo dia 13
datestr += " " + wordNextNext
used += 2
if wordNextNextNext and wordNextNextNext[0].isdigit():
datestr += " " + wordNextNextNext
used += 1
hasYear = True
else:
hasYear = False
if datestr in months:
datestr = ""
# parse 5 days from tomorrow, 10 weeks from next thursday,
# 2 months from July
validFollowups = days + months + monthsShort
validFollowups.append("gaur")
validFollowups.append("bihar")
validFollowups.append("atzo")
# validFollowups.append("atzoko")
validFollowups.append("herenegun")
validFollowups.append("orain")
validFollowups.append("oraintxe")
# validFollowups.append("ante")
# TODO
if word in froms and wordNext in validFollowups:
if not (word == "bihar" or word == "herenegun" or word == "atzo"):
used = 1
fromFlag = True
if wordNext == "bihar":
dayOffset += 1
elif wordNext == "atzo" or wordNext == "atzoko":
dayOffset -= 1
elif wordNext == | |
"""
Unit tests for EDD's REST API.
Note that tests here purposefully hard-code simple object serialization that's
also coded seperately in EDD's REST API. This should help to detect when REST
API code changes in EDD accidentally affect client code.
"""
import codecs
import csv
import logging
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from requests import codes
from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase
from threadlocals.threadlocals import set_thread_variable
from edd.profile.factory import GroupFactory, UserFactory
from main import models
from main.tests import factory
logger = logging.getLogger(__name__)
def load_permissions(model, *codenames):
ct = ContentType.objects.get_for_model(model)
return list(Permission.objects.filter(content_type=ct, codename__in=codenames))
class EddApiTestCaseMixin:
"""
Provides helper methods that improve test error messages and simplify repetitive test code.
Helper methods also enforce consistency in return codes across EDD's REST API.
"""
@classmethod
def setUpClass(cls):
"""
Overrides the default Django TestCase to clear out the threadlocal request variable during
class setUp and tearDown.
"""
super().setUpClass()
set_thread_variable("request", None)
@classmethod
def tearDownClass(cls):
"""
Overrides the default Django TestCase to clear out the threadlocal request variable during
class setUp and tearDown.
"""
super().tearDownClass()
set_thread_variable("request", None)
def setUp(self):
"""
Overrides the default Django TestCase to clear out the threadlocal request variable during
test setUp and tearDown.
"""
super().setUp()
set_thread_variable("request", None)
def tearDown(self):
"""
Overrides the default Django TestCase to clear out the threadlocal request variable during
test setUp and tearDown.
"""
super().tearDown()
set_thread_variable("request", None)
def _check_status(self, response, expected_code):
wsgi = response.wsgi_request
self.assertEqual(
response.status_code,
expected_code,
f"Received {response.status_code} instead of {expected_code} for "
f"{wsgi.method} {wsgi.path} for user {wsgi.user}. "
f"Response: {response.content}",
)
return response
class StudiesTests(EddApiTestCaseMixin, APITestCase):
"""
Tests access controls and HTTP return codes for queries to REST API
resources related to studies.
Studies should only be accessible by:
1) Superusers
2) Users who have explicit class-level mutator permissions on Studies via a
django.contrib.auth permission. Any user with a class-level mutator
permission has implied read permission on the study.
3) Users who have explicit StudyPermission granted via their individual
account or via user group membership.
"""
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.read_only_group = GroupFactory()
cls.write_only_group = GroupFactory()
cls.superuser = UserFactory(is_superuser=True)
cls.unprivileged_user = UserFactory()
cls.readonly_user = UserFactory()
cls.write_user = UserFactory()
cls.group_readonly_user = UserFactory()
cls.group_readonly_user.groups.add(cls.read_only_group)
cls.group_write_user = UserFactory()
cls.group_write_user.groups.add(cls.write_only_group)
cls.staff_user = UserFactory(is_staff=True)
cls.staff_user.user_permissions.add(
*load_permissions(models.Study, "add_study", "change_study", "delete_study")
)
cls.study = factory.StudyFactory()
cls.study.userpermission_set.create(
user=cls.readonly_user, permission_type=models.StudyPermission.READ
)
cls.study.userpermission_set.create(
user=cls.write_user, permission_type=models.StudyPermission.WRITE
)
cls.study.grouppermission_set.create(
group=cls.read_only_group, permission_type=models.StudyPermission.READ
)
cls.study.grouppermission_set.create(
group=cls.write_only_group, permission_type=models.StudyPermission.WRITE
)
def test_study_get_with_anonymous(self):
url = reverse("rest:studies-detail", args=[self.study.pk])
self.client.logout()
self._check_status(self.client.get(url), status.HTTP_403_FORBIDDEN)
def test_study_get_with_unprivleged(self):
url = reverse("rest:studies-detail", args=[self.study.pk])
self.client.force_login(self.unprivileged_user)
self._check_status(self.client.get(url), status.HTTP_404_NOT_FOUND)
def test_study_get_with_readonly(self):
url = reverse("rest:studies-detail", args=[self.study.pk])
self.client.force_login(self.readonly_user)
self._check_status(self.client.get(url), status.HTTP_200_OK)
def test_study_get_with_superuser(self):
url = reverse("rest:studies-detail", args=[self.study.pk])
self.client.force_login(self.superuser)
self._check_status(self.client.get(url), status.HTTP_200_OK)
def test_study_get_using_uuid(self):
url = reverse("rest:studies-detail", args=[self.study.uuid])
self.client.force_login(self.superuser)
self._check_status(self.client.get(url), status.HTTP_200_OK)
def test_study_delete_with_anonymous(self):
url = reverse("rest:studies-detail", args=[self.study.pk])
self.client.logout()
self._check_status(self.client.delete(url), status.HTTP_403_FORBIDDEN)
def test_study_delete_with_unprivleged(self):
url = reverse("rest:studies-detail", args=[self.study.pk])
self.client.force_login(self.unprivileged_user)
self._check_status(self.client.delete(url), status.HTTP_405_METHOD_NOT_ALLOWED)
def test_study_delete_with_staff(self):
url = reverse("rest:studies-detail", args=[self.study.pk])
self.client.force_login(self.staff_user)
self._check_status(self.client.delete(url), status.HTTP_405_METHOD_NOT_ALLOWED)
def test_study_delete_with_superuser(self):
url = reverse("rest:studies-detail", args=[self.study.pk])
self.client.force_login(self.superuser)
self._check_status(self.client.delete(url), status.HTTP_405_METHOD_NOT_ALLOWED)
def _post_payload_new_study(self):
return {
"name": "new study 1",
"description": "description goes here",
"contact_id": self.write_user.pk,
}
def test_study_add_with_anonymous(self):
url = reverse("rest:studies-list")
self.client.logout()
self._check_status(
self.client.post(url, self._post_payload_new_study()),
status.HTTP_403_FORBIDDEN,
)
def test_study_add_only_superuser_setting_off(self):
url = reverse("rest:studies-list")
with self.settings(EDD_ONLY_SUPERUSER_CREATE=False):
# with normal settings, verify all users can create studies,
# regardless of privileges
self.client.force_login(self.unprivileged_user)
self._check_status(
self.client.post(url, self._post_payload_new_study()),
status.HTTP_201_CREATED,
)
def test_study_add_without_contact(self):
url = reverse("rest:studies-list")
with self.settings(EDD_ONLY_SUPERUSER_CREATE=False):
self.client.force_login(self.unprivileged_user)
self._check_status(
self.client.post(url, {"name": "contactless study", "description": ""}),
status.HTTP_400_BAD_REQUEST,
)
def test_study_add_with_unprivledged_only_superuser_setting_on(self):
url = reverse("rest:studies-list")
with self.settings(EDD_ONLY_SUPERUSER_CREATE=True):
self.client.force_login(self.unprivileged_user)
self._check_status(
self.client.post(url, self._post_payload_new_study()),
status.HTTP_403_FORBIDDEN,
)
def test_study_add_with_staff_only_superuser_setting_on(self):
url = reverse("rest:studies-list")
with self.settings(EDD_ONLY_SUPERUSER_CREATE=True):
# staff with main.add_study cannot create with this setting
self.client.force_login(self.staff_user)
self._check_status(
self.client.post(url, self._post_payload_new_study()),
status.HTTP_403_FORBIDDEN,
)
def test_study_add_with_superuser_only_superuser_setting_on(self):
url = reverse("rest:studies-list")
with self.settings(EDD_ONLY_SUPERUSER_CREATE=True):
# verify that an administrator can create a study
self.client.force_login(self.superuser)
self._check_status(
self.client.post(url, self._post_payload_new_study()),
status.HTTP_201_CREATED,
)
def test_study_add_with_unprivledged_only_superuser_setting_permission(self):
url = reverse("rest:studies-list")
with self.settings(EDD_ONLY_SUPERUSER_CREATE="permission"):
self.client.force_login(self.unprivileged_user)
self._check_status(
self.client.post(url, self._post_payload_new_study()),
status.HTTP_403_FORBIDDEN,
)
def test_study_add_with_staff_only_superuser_setting_permission(self):
url = reverse("rest:studies-list")
with self.settings(EDD_ONLY_SUPERUSER_CREATE="permission"):
# staff with main.add_study can create with this setting
self.client.force_login(self.staff_user)
self._check_status(
self.client.post(url, self._post_payload_new_study()),
status.HTTP_201_CREATED,
)
def test_study_add_with_superuser_only_superuser_setting_permission(self):
url = reverse("rest:studies-list")
with self.settings(EDD_ONLY_SUPERUSER_CREATE="permission"):
# verify that an administrator can create a study
self.client.force_login(self.superuser)
self._check_status(
self.client.post(url, self._post_payload_new_study()),
status.HTTP_201_CREATED,
)
def _put_payload_change_study(self):
return {"name": "Test study", "description": "Description goes here"}
def _put_payload_change_study_contact(self):
return {
"name": "Updated study name",
"description": "Updated study description",
"contact_id": self.write_user.pk,
}
def test_study_change_with_anonymous(self):
url = reverse("rest:studies-detail", args=[self.study.pk])
self.client.logout()
self._check_status(
self.client.put(url, self._put_payload_change_study()),
status.HTTP_403_FORBIDDEN,
)
def test_study_change_with_unprivledged(self):
url = reverse("rest:studies-detail", args=[self.study.pk])
self.client.force_login(self.unprivileged_user)
self._check_status(
self.client.put(url, self._put_payload_change_study()),
status.HTTP_404_NOT_FOUND,
)
def test_study_change_with_readonly(self):
url = reverse("rest:studies-detail", args=[self.study.pk])
self.client.force_login(self.readonly_user)
self._check_status(
self.client.put(url, self._put_payload_change_study()),
status.HTTP_403_FORBIDDEN,
)
def test_study_change_with_write(self):
url = reverse("rest:studies-detail", args=[self.study.pk])
self.client.force_login(self.write_user)
self._check_status(
self.client.put(url, self._put_payload_change_study_contact()),
status.HTTP_200_OK,
)
def test_study_change_with_readonly_group(self):
url = reverse("rest:studies-detail", args=[self.study.pk])
self.client.force_login(self.group_readonly_user)
self._check_status(
self.client.put(url, self._put_payload_change_study()),
status.HTTP_403_FORBIDDEN,
)
def test_study_change_with_write_group(self):
url = reverse("rest:studies-detail", args=[self.study.pk])
self.client.force_login(self.group_write_user)
self._check_status(
self.client.put(url, self._put_payload_change_study_contact()),
status.HTTP_200_OK,
)
def test_study_change_with_staff(self):
url = reverse("rest:studies-detail", args=[self.study.pk])
self.client.force_login(self.staff_user)
self._check_status(
self.client.put(url, self._put_payload_change_study_contact()),
status.HTTP_404_NOT_FOUND,
)
def test_study_change_with_superuser(self):
url = reverse("rest:studies-detail", args=[self.study.pk])
# verify that an administrator can update
self.client.force_login(self.superuser)
self._check_status(
self.client.put(url, self._put_payload_change_study_contact()),
status.HTTP_200_OK,
)
def test_study_list_read_access_anonymous(self):
url = reverse("rest:studies-list")
self.client.logout()
self._check_status(self.client.get(url), status.HTTP_403_FORBIDDEN)
def test_study_list_read_access_unprivledged(self):
url = reverse("rest:studies-list")
self.client.force_login(self.unprivileged_user)
self._check_status(self.client.get(url), status.HTTP_200_OK)
class ExportTests(EddApiTestCaseMixin, APITestCase):
"""
Tests for expected outputs from /rest/export/ and /rest/stream-export/,
with additional tests for basic functioning of /rest/assays,
/rest/measurements, and /rest/values, since the data is already set up
for handling the export.
"""
@classmethod
def setUpTestData(cls):
super().setUpTestData()
User = get_user_model()
cls.admin = User.objects.get(username="system")
# create study and line with 30 assays
# each assay with one measurement having one value
cls.study = factory.StudyFactory()
cls.line = factory.LineFactory(study=cls.study)
for _i in range(30):
assay = factory.AssayFactory(line=cls.line)
measurement = factory.MeasurementFactory(assay=assay)
factory.ValueFactory(measurement=measurement)
def _assert_row_is_header_row(self, row):
# TODO update based on output config?
self.assertListEqual(
row,
[
"Study ID",
"Study Name",
"Line ID",
"Replicate Key",
"Line Name",
"Line Description",
"Protocol",
"Assay ID",
"Assay Name",
"Formal Type",
"Measurement Type",
"Compartment",
"Units",
"Value",
"Hours",
],
)
def _read_normal_response(self, response):
# read the CSV output
reader = csv.reader(codecs.iterdecode(response.content.split(b"\n"), "utf8"))
# return as a list
return list(reader)
def _read_streaming_response(self, response):
# read the CSV output from streaming_content (not response.content)
reader = csv.reader(codecs.iterdecode(response.streaming_content, "utf8"))
# return as a list
return list(reader)
def _setup_study_with_data_for_export(self):
# create study and line with 30 assays
# each assay with one measurement having one value
self.study = factory.StudyFactory()
self.line = factory.LineFactory(study=self.study)
for _i in range(30):
assay = factory.AssayFactory(line=self.line)
measurement = factory.MeasurementFactory(assay=assay)
factory.ValueFactory(measurement=measurement)
def test_export_login_required(self):
url = reverse("rest:export-list")
response = self.client.get(url, {"line_id": 8})
self.assertEqual(response.status_code, codes.forbidden)
def test_export_as_normal_user(self):
url = reverse("rest:export-list")
readonly_user = UserFactory()
self.study.userpermission_set.create(
user=readonly_user, permission_type=models.StudyPermission.READ
)
self.client.force_authenticate(user=readonly_user)
# request using slug instead of ID
response = self.client.get(url, {"in_study": self.study.slug})
# validate
table = self._read_normal_response(response)
self.assertEqual(response.status_code, codes.ok)
self.assertEqual(response.get("Content-Type"), "text/csv; charset=utf-8")
self._assert_row_is_header_row(table[0])
def test_export_as_normal_user_with_no_bound_data(self):
url = reverse("rest:export-list")
readonly_user = UserFactory()
self.study.userpermission_set.create(
user=readonly_user, permission_type=models.StudyPermission.READ
)
self.client.force_authenticate(user=readonly_user)
# request using slug instead of ID
response = self.client.get(url)
# validate
table = self._read_normal_response(response)
self.assertEqual(response.status_code, codes.ok)
self.assertEqual(response.get("Content-Type"), "text/csv; charset=utf-8")
self._assert_row_is_header_row(table[0])
def test_export_using_in_study_slug(self):
url = reverse("rest:export-list")
self.client.force_authenticate(user=self.admin)
# request using slug instead of ID
response = self.client.get(url, {"in_study": self.study.slug})
# validate
table = self._read_normal_response(response)
self.assertEqual(response.status_code, codes.ok)
self.assertEqual(response.get("Content-Type"), "text/csv; charset=utf-8")
self._assert_row_is_header_row(table[0])
def test_export_using_in_study_pk(self):
url = reverse("rest:export-list")
self.client.force_authenticate(user=self.admin)
# request using slug instead of ID
response = self.client.get(url, {"in_study": self.study.pk})
# validate
table = self._read_normal_response(response)
self.assertEqual(response.status_code, codes.ok)
self.assertEqual(response.get("Content-Type"), "text/csv; charset=utf-8")
self._assert_row_is_header_row(table[0])
def test_export_using_in_study_uuid(self):
url = reverse("rest:export-list")
self.client.force_authenticate(user=self.admin)
# request using slug instead of ID
response = self.client.get(url, {"in_study": self.study.uuid})
# validate
table = self._read_normal_response(response)
self.assertEqual(response.status_code, codes.ok)
self.assertEqual(response.get("Content-Type"), "text/csv; charset=utf-8")
self._assert_row_is_header_row(table[0])
def test_export_output_all(self):
url = reverse("rest:export-list")
self.client.force_authenticate(user=self.admin)
# force request with big page size to get all in one response
response = self.client.get(url, {"line_id": self.line.pk, "page_size": 50})
# validate
table = self._read_normal_response(response)
self.assertEqual(response.status_code, codes.ok)
self.assertIsNone(response.get("Link"))
self.assertEqual(response.get("Content-Type"), "text/csv; charset=utf-8")
self._assert_row_is_header_row(table[0])
# one row for header, plus 30 assays/measurements
self.assertEqual(len(table), 31)
def test_export_output_first_page(self):
url = reverse("rest:export-list")
self.client.force_authenticate(user=self.admin)
# force request with small page_size to see paging of results
response = self.client.get(url, {"line_id": self.line.pk, "page_size": 5})
# validate
table = self._read_normal_response(response)
self.assertEqual(response.status_code, codes.ok)
self.assertRegex(
response.get("Link"), r'<https?://.*/rest/export/\?.*>; rel="next"'
)
self.assertEqual(response.get("Content-Type"), "text/csv; charset=utf-8")
self._assert_row_is_header_row(table[0])
# one row for header, plus page_size==5 rows
self.assertEqual(len(table), 6)
def test_export_output_last_page(self):
url = reverse("rest:export-list")
self.client.force_authenticate(user=self.admin)
# force request with small page_size to see paging of results
response = self.client.get(
url, {"line_id": self.line.pk, "page_size": 5, "page": 6}
)
# validate
table = self._read_normal_response(response)
self.assertEqual(response.status_code, codes.ok)
self.assertRegex(
response.get("Link"), r'<https?://.*/rest/export/\?.*>; rel="prev"'
)
self.assertEqual(response.get("Content-Type"), | |
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import json
import shlex
import unittest.mock
from contextlib import contextmanager
from enum import Enum
from functools import partial
from textwrap import dedent
from typing import Any, Callable, Dict, cast
import pytest
import toml
import yaml
from packaging.version import Version
from pants.base.deprecated import CodeRemovedError, warn_or_error
from pants.base.hash_utils import CoercingEncoder
from pants.engine.fs import FileContent
from pants.option.config import Config
from pants.option.custom_types import UnsetBool, file_option, shell_str, target_option
from pants.option.errors import (
BooleanConversionError,
BooleanOptionNameWithNo,
DefaultValueType,
FromfileError,
HelpType,
ImplicitValIsNone,
InvalidKwarg,
InvalidMemberType,
MemberTypeNotAllowed,
MutuallyExclusiveOptionError,
NoOptionNames,
OptionAlreadyRegistered,
OptionNameDash,
OptionNameDoubleDash,
ParseError,
)
from pants.option.global_options import GlobalOptions
from pants.option.option_types import StrOption
from pants.option.options import Options
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.option.parser import Parser
from pants.option.ranked_value import Rank, RankedValue
from pants.option.scope import GLOBAL_SCOPE, ScopeInfo
from pants.option.subsystem import Subsystem
from pants.util.contextutil import temporary_file, temporary_file_path
_FAKE_CUR_VERSION = "1.0.0.dev0"
def global_scope() -> ScopeInfo:
return ScopeInfo(GLOBAL_SCOPE, GlobalOptions)
def task(scope: str) -> ScopeInfo:
return ScopeInfo(scope, is_goal=True)
def intermediate(scope: str) -> ScopeInfo:
return ScopeInfo(scope)
def subsystem(scope: str) -> ScopeInfo:
return ScopeInfo(scope)
def create_options(
scopes: list[str],
register_fn: Callable[[Options], None],
args: list[str] | None = None,
*,
env: dict[str, str] | None = None,
config: dict[str, dict[str, Any]] | None = None,
extra_scope_infos: list[ScopeInfo] | None = None,
) -> Options:
options = Options.create(
env=env or {},
config=Config.load([FileContent("pants.toml", toml.dumps(config or {}).encode())]),
known_scope_infos=[*(ScopeInfo(scope) for scope in scopes), *(extra_scope_infos or ())],
args=["./pants", *(args or ())],
)
register_fn(options)
return options
# ----------------------------------------------------------------------------------------
# Boolean handling.
# ----------------------------------------------------------------------------------------
def register_bool_opts(opts: Options) -> None:
opts.register(GLOBAL_SCOPE, "--default-missing", type=bool)
opts.register(GLOBAL_SCOPE, "--default-true", type=bool, default=True)
opts.register(GLOBAL_SCOPE, "--default-false", type=bool, default=False)
opts.register(GLOBAL_SCOPE, "--unset", type=bool, default=UnsetBool)
opts.register(GLOBAL_SCOPE, "--implicit-true", type=bool, implicit_value=True)
opts.register(GLOBAL_SCOPE, "--implicit-false", type=bool, implicit_value=False)
opts.register(
GLOBAL_SCOPE,
"--implicit-false-default-false",
type=bool,
implicit_value=False,
default=False,
)
opts.register(
GLOBAL_SCOPE, "--implicit-false-default-true", type=bool, implicit_value=False, default=True
)
def test_bool_explicit_values() -> None:
def register(opt: Options) -> None:
opt.register(GLOBAL_SCOPE, "--opt", type=bool)
def assert_val(arg: str, expected: bool) -> None:
global_options = create_options(
[GLOBAL_SCOPE], register, [f"--opt={arg}"]
).for_global_scope()
assert global_options.opt is expected
assert_val("false", False)
assert_val("False", False)
assert_val("true", True)
assert_val("True", True)
def test_bool_defaults() -> None:
opts = create_options([GLOBAL_SCOPE], register_bool_opts).for_global_scope()
assert opts.default_missing is False
assert opts.default_true is True
assert opts.default_false is False
assert opts.unset is None
assert opts.implicit_true is False
assert opts.implicit_false is True
assert opts.implicit_false_default_false is False
assert opts.implicit_false_default_true is True
def test_bool_args() -> None:
opts = create_options(
[GLOBAL_SCOPE],
register_bool_opts,
[
"--default-missing",
"--default-true",
"--default-false",
"--unset",
"--implicit-true",
"--implicit-false",
"--implicit-false-default-false",
"--implicit-false-default-true",
],
).for_global_scope()
assert opts.default_missing is True
assert opts.default_true is True
assert opts.default_false is True
assert opts.unset is True
assert opts.implicit_true is True
assert opts.implicit_false is False
assert opts.implicit_false_default_false is False
assert opts.implicit_false_default_true is False
def test_bool_negate() -> None:
opts = create_options(
[GLOBAL_SCOPE],
register_bool_opts,
[
"--no-default-missing",
"--no-default-true",
"--no-default-false",
"--no-unset",
"--no-implicit-true",
"--no-implicit-false",
"--no-implicit-false-default-false",
"--no-implicit-false-default-true",
],
).for_global_scope()
assert opts.default_missing is False
assert opts.default_true is False
assert opts.default_false is False
assert opts.unset is False
assert opts.implicit_true is False
assert opts.implicit_false is True
assert opts.implicit_false_default_false is True
assert opts.implicit_false_default_true is True
@pytest.mark.parametrize("val", [False, True])
def test_bool_config(val: bool) -> None:
opt_names = (
"default_missing",
"default_true",
"default_false",
"implicit_true",
"implicit_false",
"implicit_false_default_false",
"implicit_false_default_true",
)
opts = create_options(
[GLOBAL_SCOPE], register_bool_opts, config={"GLOBAL": {opt: val for opt in opt_names}}
).for_global_scope()
for opt in opt_names:
assert opts[opt] is val, f"option {opt} has value {opts[opt]} but expected {val}"
@pytest.mark.parametrize("val", (11, "AlmostTrue"))
def test_bool_invalid_value(val: Any) -> None:
def register(opts: Options) -> None:
opts.register(GLOBAL_SCOPE, "--opt", type=bool)
with pytest.raises(BooleanConversionError):
create_options([GLOBAL_SCOPE], register, config={"GLOBAL": {"opt": val}}).for_global_scope()
# ----------------------------------------------------------------------------------------
# Type checks
# ----------------------------------------------------------------------------------------
@contextmanager
def no_exception():
"""use in tests as placeholder for a pytest.raises, when no exception is expected."""
yield None
@pytest.mark.parametrize(
"option_kwargs, assert_expected",
[
(
dict(type=str, default=""),
no_exception(),
),
(
dict(type=str, default=42),
pytest.raises(
DefaultValueType, match=r"Default value int\(42\) does not match option type str\."
),
),
(
dict(type=bool, default="True"),
no_exception(),
),
(
dict(type=bool, default=True),
no_exception(),
),
(
dict(type=bool, default="not a bool"),
pytest.raises(
BooleanConversionError, match=r'Got "not a bool"\. Expected "True" or "False"\.'
),
),
(
dict(type=int, default=1.0),
pytest.raises(
DefaultValueType,
match=r"Default value float\(1\.0\) does not match option type int\. \[option --opt in global scope\]\.",
),
),
(
dict(type=list, member_type=int, default="[1, 2, 3]"),
no_exception(),
),
(
dict(type=list, member_type=int, default="[1, 2.1, 3]"),
pytest.raises(
DefaultValueType,
match=r"Default member value type mismatch\.\n\n Member value float\(2\.1\) does not match list option type int\.",
),
),
(
dict(type=list, member_type=float, default="[1.1, 2.0, 3.3]"),
no_exception(),
),
(
dict(type=list, member_type=float, default="[1.1, 2.2, '3.3']"),
pytest.raises(
DefaultValueType,
match=r"Member value str\('3\.3'\) does not match list option type float\.",
),
),
(
dict(type=dict, default="{'foo': 'bar'}"),
no_exception(),
),
(
dict(type=dict, default="['foo', 'bar']"),
pytest.raises(ParseError, match=r"Invalid dict value: \['foo', 'bar'\]"),
),
],
)
def test_default_value_type_assert(option_kwargs, assert_expected):
def register(opts: Options) -> None:
opts.register(GLOBAL_SCOPE, "--opt", **option_kwargs)
with assert_expected:
create_options([GLOBAL_SCOPE], register).for_scope(GLOBAL_SCOPE)
# ----------------------------------------------------------------------------------------
# Deprecations.
# ----------------------------------------------------------------------------------------
def test_deprecated_options(caplog) -> None:
def register(opts: Options) -> None:
opts.register(
GLOBAL_SCOPE, "--old1", removal_version="999.99.9.dev0", removal_hint="Stop it."
)
opts.register(
GLOBAL_SCOPE,
"--bool1",
type=bool,
removal_version="999.99.9.dev0",
removal_hint="¡Basta!",
)
opts.register("scope", "--valid")
opts.register(
"scope", "--old2", removal_version="999.99.9.dev0", removal_hint="Stop with the scope."
)
opts.register(
"scope",
"--bool2",
type=bool,
removal_version="999.99.9.dev0",
removal_hint="¡Basta but scoped!",
)
def assert_deprecated(
scope: str,
opt: str,
args: list[str],
*,
expected: str | bool,
env: dict[str, str] | None = None,
config: dict[str, dict[str, str]] | None = None,
) -> None:
caplog.clear()
warn_or_error.clear() # type: ignore[attr-defined]
opts = create_options([GLOBAL_SCOPE, "scope"], register, args, env=env, config=config)
assert opts.for_scope(scope)[opt] == expected
assert len(caplog.records) == 1
assert "will be removed in version" in caplog.text
assert opt in caplog.text
assert_deprecated(GLOBAL_SCOPE, "old1", ["--old1=x"], expected="x")
assert_deprecated(GLOBAL_SCOPE, "bool1", ["--bool1"], expected=True)
assert_deprecated(GLOBAL_SCOPE, "bool1", ["--no-bool1"], expected=False)
assert_deprecated("scope", "old2", ["scope", "--old2=x"], expected="x")
assert_deprecated("scope", "old2", ["--scope-old2=x"], expected="x")
assert_deprecated("scope", "bool2", ["scope", "--bool2"], expected=True)
assert_deprecated("scope", "bool2", ["scope", "--no-bool2"], expected=False)
assert_deprecated("scope", "bool2", ["--scope-bool2"], expected=True)
assert_deprecated("scope", "bool2", ["--no-scope-bool2"], expected=False)
assert_deprecated(GLOBAL_SCOPE, "old1", [], env={"PANTS_GLOBAL_OLD1": "x"}, expected="x")
assert_deprecated("scope", "old2", [], env={"PANTS_SCOPE_OLD2": "x"}, expected="x")
assert_deprecated(GLOBAL_SCOPE, "old1", [], config={"GLOBAL": {"old1": "x"}}, expected="x")
assert_deprecated("scope", "old2", [], config={"scope": {"old2": "x"}}, expected="x")
# Make sure the warnings don't come out for regular options.
caplog.clear()
warn_or_error.clear() # type: ignore[attr-defined]
assert (
create_options([GLOBAL_SCOPE, "scope"], register, ["--scope-valid=x"])
.for_scope("scope")
.valid
== "x"
)
assert not caplog.records
def test_deprecated_options_error() -> None:
def register(opts: Options) -> None:
opts.register(GLOBAL_SCOPE, "--expired", removal_version="0.0.1.dev0")
with pytest.raises(CodeRemovedError):
create_options([GLOBAL_SCOPE], register, [])
@unittest.mock.patch("pants.base.deprecated.PANTS_SEMVER", Version(_FAKE_CUR_VERSION))
def test_deprecated_options_start_version(caplog) -> None:
def register(opts: Options) -> None:
opts.register(
GLOBAL_SCOPE,
"--delayed",
removal_version="999.99.9.dev0",
deprecation_start_version="500.0.0.dev0",
)
opts.register(
GLOBAL_SCOPE,
"--past-start",
removal_version="999.99.9.dev0",
deprecation_start_version=_FAKE_CUR_VERSION,
)
caplog.clear()
assert (
create_options([GLOBAL_SCOPE], register, ["--delayed=x"]).for_global_scope().delayed == "x"
)
assert not caplog.records
assert (
create_options([GLOBAL_SCOPE], register, ["--past-start=x"]).for_global_scope().past_start
== "x"
)
assert len(caplog.records) == 1
assert "will be removed in version" in caplog.text
assert "past_start" in caplog.text
def test_scope_deprecation(caplog) -> None:
# This test demonstrates that two different new scopes can deprecate the same
# old scope. I.e., it's possible to split an old scope's options among multiple new scopes.
class Subsystem1(Subsystem):
options_scope = "new1"
deprecated_options_scope = "deprecated"
deprecated_options_scope_removal_version = "9999.9.9.dev0"
foo = StrOption("--foo", help="")
bar = StrOption("--bar", help="")
baz = StrOption("--baz", help="")
class Subsystem2(Subsystem):
options_scope = "new2"
deprecated_options_scope = "deprecated"
deprecated_options_scope_removal_version = "9999.9.9.dev0"
qux = StrOption("--qux", help="")
def register(opts: Options) -> None:
opts.register(Subsystem1.options_scope, "--foo")
opts.register(Subsystem1.options_scope, "--bar")
opts.register(Subsystem1.options_scope, "--baz")
opts.register(Subsystem2.options_scope, "--qux")
opts = create_options(
[GLOBAL_SCOPE],
register,
["--new1-baz=vv"],
extra_scope_infos=[Subsystem1.get_scope_info(), Subsystem2.get_scope_info()],
config={
Subsystem1.options_scope: {"foo": "xx"},
Subsystem1.deprecated_options_scope: {
"foo": "yy",
"bar": "zz",
"baz": "ww",
"qux": "uu",
},
},
)
caplog.clear()
vals1 = opts.for_scope(Subsystem1.options_scope)
assert len(caplog.records) == 1
assert Subsystem1.deprecated_options_scope in caplog.text
assert "foo" in caplog.text
# Deprecated scope takes precedence at equal rank, but new scope takes precedence at higher
# rank.
assert vals1.foo == "yy"
assert vals1.bar == "zz"
assert vals1.baz == "vv"
caplog.clear()
vals2 = opts.for_scope(Subsystem2.options_scope)
assert len(caplog.records) == 1
assert Subsystem1.deprecated_options_scope in caplog.text
assert "qux" in caplog.text
assert vals2.qux == "uu"
def test_scope_deprecation_default_config_section(caplog) -> None:
# Confirms that a DEFAULT option does not trigger deprecation warnings for a deprecated scope.
class Subsystem1(Subsystem):
options_scope = "new"
deprecated_options_scope = "deprecated"
deprecated_options_scope_removal_version = "9999.9.9.dev0"
def register(opts: Options) -> None:
opts.register(Subsystem1.options_scope, "--foo")
opts = create_options(
[GLOBAL_SCOPE],
register,
[],
extra_scope_infos=[Subsystem1.get_scope_info()],
config={"DEFAULT": {"foo": "aa"}, Subsystem1.options_scope: {"foo": "xx"}},
)
caplog.clear()
assert opts.for_scope(Subsystem1.options_scope).foo == "xx"
assert not caplog.records
# ----------------------------------------------------------------------------------------
# Legacy Unittest TestCase.
# ----------------------------------------------------------------------------------------
class OptionsTest(unittest.TestCase):
@staticmethod
def _create_config(config: dict[str, dict[str, str]] | None = None) -> Config:
return Config.load([FileContent("test_config.toml", toml.dumps(config or {}).encode())])
def _parse(
self,
*,
flags: str = "",
env: dict[str, str] | None = None,
config: dict[str, dict[str, Any]] | None = None,
bootstrap_option_values=None,
) -> Options:
args = ["./pants", *shlex.split(flags)]
options = Options.create(
env=env or {},
config=self._create_config(config),
known_scope_infos=OptionsTest._known_scope_infos,
args=args,
bootstrap_option_values=bootstrap_option_values,
)
self._register(options)
return options
_known_scope_infos = [
ScopeInfo(scope)
for scope in (
GLOBAL_SCOPE,
"anotherscope",
"compile",
"compile.java",
"stale",
"test",
"test.junit",
"passconsumer",
"simple",
"simple-dashed",
"scoped.a.bit",
"scoped.and-dashed",
"fromfile",
"fingerprinting",
"enum-opt",
"separate-enum-opt-scope",
"other-enum-scope",
)
| |
"""
qzone.items
~~~~~~~~~~~
This module implements the items for qzone scraping.
:copyright: (c) 2017 by <NAME>.
:date: 2017/10/27.
:license: MIT License, see LICENSE.txt for more details.
"""
from lib.basis import SocialMediaItem
class QzoneItem(SocialMediaItem):
pass
class QzoneUserItem(QzoneItem):
def __init__(self):
self._qq = 0 # QQ号
self._name = '' # 昵称
def __str__(self):
return 'QQ: ' + str(self.qq) + '; Name: ' + str(self.name)
def __hash__(self):
return hash(self.qq)
@property
def qq(self):
return self._qq
@qq.setter
def qq(self, value):
if not isinstance(value, int):
raise TypeError('Attribute \'qq\' should be an instance of type \'int\'. '
'Found: %s.' % type(value))
if value < 0:
raise ValueError("Attribute \'qq\' should be positive.")
self._time = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if not isinstance(value, str):
raise TypeError('Attribute \'name\' should be an instance of type \'str\'. '
'Found: %s.' % type(value))
self._name = value
class QzoneEmotionItem(QzoneItem):
def __init__(self):
self._id = '' # 说说ID
self._owner = QzoneUserItem() # 主人
self._time = '' # 时间
self._content = '' # 内容
self.pictures = [] # 图片列表
self._source = '' # 设备名称
self._location = '' # 位置
self.visitors = [] # 浏览者列表
self.likers = [] # 点赞者列表
self.comments = [] # 评论列表
def __str__(self):
string = ''
string += 'ID: ' + self._id + '\n'
string += 'Owner: ' + str(self._owner) + '\n'
string += 'Time: ' + self._time + '\n'
string += 'Content: ' + str(self._content) + '\n'
string += 'Pictures: ' + '; '.join([str(pic) for pic in self.pictures]) + '\n'
string += 'Source: ' + str(self._source) + '\n'
string += 'Location: ' + str(self._location) + '\n'
string += 'Visitor Count: ' + str(len(self.visitors)) + '\n'
string += 'Liker Count: ' + str(len(self.likers)) + '\n'
string += 'Comment Count: ' + str(len(self.comments)) + '\n'
return string
def __hash__(self):
return hash(self.id)
@property
def id(self):
return self._id
@id.setter
def id(self, value):
if not isinstance(value, str):
raise TypeError('Attribute \'id\' should be an instance of type \'str\'. '
'Found: %s.' % type(value))
self._id = value
@property
def owner(self):
return self._owner
@owner.setter
def owner(self, value):
if not isinstance(value, QzoneUserItem):
raise TypeError('Attribute \'owner\' should be an instance of type \'QzoneUserItem\'. '
'Found: %s.' % type(value))
self._owner = value
@property
def time(self):
return self._time
@time.setter
def time(self, value):
if not isinstance(value, str):
raise TypeError('Attribute \'time\' should be an instance of type \'str\'. '
'Found: %s.' % type(value))
self._time = value
@property
def content(self):
return self._content
@content.setter
def content(self, value):
if not isinstance(value, str):
raise TypeError('Attribute \'content\' should be an instance of type \'str\'. '
'Found: %s.' % type(value))
self._content = value
@property
def source(self):
return self._source
@source.setter
def source(self, value):
if not isinstance(value, str):
raise TypeError('Attribute \'source\' should be an instance of type \'str\'. '
'Found: %s.' % type(value))
self._source = value
@property
def location(self):
return self._location
@location.setter
def location(self, value):
if not isinstance(value, str):
raise TypeError('Attribute \'location\' should be an instance of type \'str\'. '
'Found: %s.' % type(value))
self._location = value
class QzoneRepostEmotionItem(QzoneEmotionItem):
def __init__(self):
QzoneEmotionItem.__init__(self)
self._repost_source = QzoneUserItem() # 转发来源
self._repost_reason = '' # 转发理由
def __str__(self):
string = QzoneEmotionItem.__str__(self)
string += 'Repost Source: ' + str(self._repost_source) + '\n'
string += 'Repost Reason: ' + self._repost_reason + '\n'
return string
def __hash__(self):
return hash(self.id)
@property
def repost_source(self):
return self._repost_source
@repost_source.setter
def repost_source(self, value):
if not isinstance(value, QzoneUserItem):
raise TypeError('Attribute \'repost_source\' should be an instance of type \'str\'. '
'Found: %s.' % type(value))
self._repost_source = value
@property
def repost_reason(self):
return self._repost_reason
@repost_reason.setter
def repost_reason(self, value):
if not isinstance(value, str):
raise TypeError('Attribute \'repost_reason\' should be an instance of type \'str\'. '
'Found: %s.' % type(value))
self._repost_reason = value
class QzoneCommentItem(QzoneItem):
def __init__(self):
self._commenter = QzoneUserItem() # 评论者
self._time = '' # 评论时间
self._content = '' # 评论内容
self.pictures = [] # 评论图片列表
self.replies = [] # 评论回复列表
def __str__(self):
string = ''
string += 'Commenter: ' + str(self._commenter) + '\n'
string += 'Time: ' + self._time + '\n'
string += 'Content: ' + str(self._content) + '\n'
string += 'Pictures: ' + '; '.join([str(pic) for pic in self.pictures]) + '\n'
string += 'Reply Number: ' + str(len(self.replies)) + '\n'
return string
def __hash__(self):
return hash(self._content)
@property
def commenter(self):
return self._commenter
@commenter.setter
def commenter(self, value):
if not isinstance(value, str):
raise TypeError('Attribute \'commenter\' should be an instance of type \'str\'. '
'Found: %s.' % type(value))
self._commenter = value
@property
def time(self):
return self._time
@time.setter
def time(self, value):
if not isinstance(value, str):
raise TypeError('Attribute \'time\' should be an instance of type \'str\'. '
'Found: %s.' % type(value))
self._time = value
@property
def content(self):
return self._content
@content.setter
def content(self, value):
if not isinstance(value, str):
raise TypeError('Attribute \'content\' should be an instance of type \'str\'. '
'Found: %s.' % type(value))
self._content = value
class QzoneCommentReplyItem(QzoneItem):
def __init__(self):
self._replier = QzoneUserItem() # 回复者
self._replyto = QzoneUserItem() # 回复对象
self._time = '' # 回复时间
self._content = '' # 回复内容
def __str__(self):
return self._time + ' ' + self._replier.name + ' reply to ' + self._replyto.name + ': ' + self._content
def __hash__(self):
return hash(self._content)
@property
def replier(self):
return self._replier
@replier.setter
def replier(self, value):
if not isinstance(value, str):
raise TypeError('Attribute \'replier\' should be an instance of type \'str\'. '
'Found: %s.' % type(value))
self._replier = value
@property
def replyto(self):
return self._replyto
@replyto.setter
def replyto(self, value):
if not isinstance(value, str):
raise TypeError('Attribute \'replyto\' should be an instance of type \'str\'. '
'Found: %s.' % type(value))
self._replyto = value
@property
def time(self):
return self._time
@time.setter
def time(self, value):
if not isinstance(value, str):
raise TypeError('Attribute \'time\' should be an instance of type \'str\'. '
'Found: %s.' % type(value))
self._time = value
@property
def content(self):
return self._content
@content.setter
def content(self, value):
if not isinstance(value, str):
raise TypeError('Attribute \'content\' should be an instance of type \'str\'. '
'Found: %s.' % type(value))
self._content = value
class QzoneMessageItem(QzoneItem):
def __init__(self):
self._id = '' # 留言ID
self._owner = QzoneUserItem() # 主人
self._poster = QzoneUserItem() # 留言者
self._time = '' # 留言时间
self._content = '' # 留言内容
self.replies = [] # 留言回复列表
def __str__(self):
string = ''
string += 'Owner: ' + str(self._owner) + '\n'
string += 'Poster: ' + str(self._poster) + '\n'
string += 'Time: ' + self._time + '\n'
string += 'Content: ' + self._content + '\n'
string += 'Replies: ' + '; '.join([str(reply) for reply in self.replies]) + '\n'
return string
def __hash__(self):
return hash(self._id)
@property
def id(self):
return self._id
@id.setter
def id(self, value):
if not isinstance(value, str):
raise TypeError('Attribute \'id\' should be an instance of type \'str\'. '
'Found: %s.' % type(value))
self._id = value
@property
def owner(self):
return self._owner
@owner.setter
def owner(self, value):
if not isinstance(value, QzoneUserItem):
raise TypeError('Attribute \'owner\' should be an instance of type \'QzoneUserItem\'. '
'Found: %s.' % type(value))
self._owner = value
@property
def poster(self):
return self._poster
@poster.setter
def poster(self, value):
if not isinstance(value, QzoneUserItem):
raise TypeError('Attribute \'poster\' should be an instance of type \'QzoneUserItem\'. '
'Found: %s.' % type(value))
self._poster = value
@property
def time(self):
return self._time
@time.setter
def time(self, value):
if not isinstance(value, str):
raise TypeError('Attribute \'time\' should be an instance of type \'str\'. '
'Found: %s.' % type(value))
self._time = value
@property
def content(self):
return self._content
@content.setter
def content(self, value):
if not isinstance(value, str):
raise TypeError('Attribute \'content\' should be an instance of type \'str\'. '
'Found: %s.' % type(value))
self._content = value
class QzoneMessageReplyItem(QzoneItem):
def __init__(self):
self._replier = QzoneUserItem() # 回复者
self._time = '' # 回复时间
self._content = '' # 回复内容
def __str__(self):
return self._time + ' ' + self._replier.name + ' replied: ' + self._content
def __hash__(self):
return hash(self.content)
@property
def replier(self):
return self._replier
@replier.setter
def replier(self, value):
if not isinstance(value, str):
raise TypeError('Attribute \'replier\' should be an instance of type \'str\'. '
'Found: %s.' % type(value))
self._replier = value
@property
def time(self):
return self._time
@time.setter
def time(self, value):
if not isinstance(value, str):
raise TypeError('Attribute \'time\' should be an instance of type \'str\'. '
'Found: %s.' % type(value))
self._time = value
@property
def content(self):
return self._content
@content.setter
def content(self, value):
if not isinstance(value, str):
raise TypeError('Attribute \'content\' should | |
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for creating PipelineSpec proto objects."""
import collections
import json
import re
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
import kfp
from google.protobuf import json_format
from google.protobuf import struct_pb2
from kfp import dsl
from kfp.compiler import pipeline_spec_builder as builder
from kfp.components import for_loop
from kfp.components import pipeline_channel
from kfp.components import pipeline_task
from kfp.components import placeholders
from kfp.components import structures
from kfp.components import tasks_group
from kfp.components import utils
from kfp.components import utils as component_utils
from kfp.components.types import artifact_types
from kfp.components.types import type_utils
from kfp.pipeline_spec import pipeline_spec_pb2
GroupOrTaskType = Union[tasks_group.TasksGroup, pipeline_task.PipelineTask]
def _additional_input_name_for_pipeline_channel(
channel_or_name: Union[pipeline_channel.PipelineChannel, str]) -> str:
"""Gets the name for an additional (compiler-injected) input."""
# Adding a prefix to avoid (reduce chance of) name collision between the
# original component inputs and the injected input.
return 'pipelinechannel--' + (
channel_or_name.full_name if isinstance(
channel_or_name, pipeline_channel.PipelineChannel) else
channel_or_name)
def _to_protobuf_value(value: type_utils.PARAMETER_TYPES) -> struct_pb2.Value:
"""Creates a google.protobuf.struct_pb2.Value message out of a provide
value.
Args:
value: The value to be converted to Value message.
Returns:
A google.protobuf.struct_pb2.Value message.
Raises:
ValueError if the given value is not one of the parameter types.
"""
if isinstance(value, str):
return struct_pb2.Value(string_value=value)
elif isinstance(value, (int, float)):
return struct_pb2.Value(number_value=value)
elif isinstance(value, bool):
return struct_pb2.Value(bool_value=value)
elif isinstance(value, dict):
return struct_pb2.Value(
struct_value=struct_pb2.Struct(
fields={k: _to_protobuf_value(v) for k, v in value.items()}))
elif isinstance(value, list):
return struct_pb2.Value(
list_value=struct_pb2.ListValue(
values=[_to_protobuf_value(v) for v in value]))
else:
raise ValueError('Value must be one of the following types: '
'str, int, float, bool, dict, and list. Got: '
f'"{value}" of type "{type(value)}".')
def build_task_spec_for_task(
task: pipeline_task.PipelineTask,
parent_component_inputs: pipeline_spec_pb2.ComponentInputsSpec,
tasks_in_current_dag: List[str],
input_parameters_in_current_dag: List[str],
input_artifacts_in_current_dag: List[str],
) -> pipeline_spec_pb2.PipelineTaskSpec:
"""Builds PipelineTaskSpec for a pipeline task.
A task input may reference an output outside its immediate DAG.
For instance::
random_num = random_num_op(...)
with dsl.Condition(random_num.output > 5):
print_op('%s > 5' % random_num.output)
In this example, `dsl.Condition` forms a subDAG with one task from `print_op`
inside the subDAG. The task of `print_op` references output from `random_num`
task, which is outside the sub-DAG. When compiling to IR, such cross DAG
reference is disallowed. So we need to "punch a hole" in the sub-DAG to make
the input available in the subDAG component inputs if it's not already there,
Next, we can call this method to fix the tasks inside the subDAG to make them
reference the component inputs instead of directly referencing the original
producer task.
Args:
task: The task to build a PipelineTaskSpec for.
parent_component_inputs: The task's parent component's input specs.
tasks_in_current_dag: The list of tasks names for tasks in the same dag.
input_parameters_in_current_dag: The list of input parameters in the DAG
component.
input_artifacts_in_current_dag: The list of input artifacts in the DAG
component.
Returns:
A PipelineTaskSpec object representing the task.
"""
pipeline_task_spec = pipeline_spec_pb2.PipelineTaskSpec()
pipeline_task_spec.task_info.name = (
task.task_spec.display_name or task.name)
# Use task.name for component_ref.name because we may customize component
# spec for individual tasks to work around the lack of optional inputs
# support in IR.
pipeline_task_spec.component_ref.name = (
component_utils.sanitize_component_name(task.name))
pipeline_task_spec.caching_options.enable_cache = (
task.task_spec.enable_caching)
for input_name, input_value in task.inputs.items():
if isinstance(input_value, pipeline_channel.PipelineArtifactChannel):
if input_value.task_name:
# Value is produced by an upstream task.
if input_value.task_name in tasks_in_current_dag:
# Dependent task within the same DAG.
pipeline_task_spec.inputs.artifacts[
input_name].task_output_artifact.producer_task = (
component_utils.sanitize_task_name(
input_value.task_name))
pipeline_task_spec.inputs.artifacts[
input_name].task_output_artifact.output_artifact_key = (
input_value.name)
else:
# Dependent task not from the same DAG.
component_input_artifact = (
_additional_input_name_for_pipeline_channel(input_value)
)
assert component_input_artifact in parent_component_inputs.artifacts, \
'component_input_artifact: {} not found. All inputs: {}'.format(
component_input_artifact, parent_component_inputs)
pipeline_task_spec.inputs.artifacts[
input_name].component_input_artifact = (
component_input_artifact)
else:
raise RuntimeError(
f'Artifacts must be produced by a task. Got {input_value}.')
elif isinstance(input_value, pipeline_channel.PipelineParameterChannel):
if input_value.task_name:
# Value is produced by an upstream task.
if input_value.task_name in tasks_in_current_dag:
# Dependent task within the same DAG.
pipeline_task_spec.inputs.parameters[
input_name].task_output_parameter.producer_task = (
component_utils.sanitize_task_name(
input_value.task_name))
pipeline_task_spec.inputs.parameters[
input_name].task_output_parameter.output_parameter_key = (
input_value.name)
else:
# Dependent task not from the same DAG.
component_input_parameter = (
_additional_input_name_for_pipeline_channel(input_value)
)
assert component_input_parameter in parent_component_inputs.parameters, \
'component_input_parameter: {} not found. All inputs: {}'.format(
component_input_parameter, parent_component_inputs)
pipeline_task_spec.inputs.parameters[
input_name].component_input_parameter = (
component_input_parameter)
else:
# Value is from pipeline input.
component_input_parameter = input_value.full_name
if component_input_parameter not in parent_component_inputs.parameters:
component_input_parameter = (
_additional_input_name_for_pipeline_channel(input_value)
)
pipeline_task_spec.inputs.parameters[
input_name].component_input_parameter = (
component_input_parameter)
elif isinstance(input_value, for_loop.LoopArgument):
component_input_parameter = (
_additional_input_name_for_pipeline_channel(input_value))
assert component_input_parameter in parent_component_inputs.parameters, \
'component_input_parameter: {} not found. All inputs: {}'.format(
component_input_parameter, parent_component_inputs)
pipeline_task_spec.inputs.parameters[
input_name].component_input_parameter = (
component_input_parameter)
elif isinstance(input_value, for_loop.LoopArgumentVariable):
component_input_parameter = (
_additional_input_name_for_pipeline_channel(
input_value.loop_argument))
assert component_input_parameter in parent_component_inputs.parameters, \
'component_input_parameter: {} not found. All inputs: {}'.format(
component_input_parameter, parent_component_inputs)
pipeline_task_spec.inputs.parameters[
input_name].component_input_parameter = (
component_input_parameter)
pipeline_task_spec.inputs.parameters[
input_name].parameter_expression_selector = (
'parseJson(string_value)["{}"]'.format(
input_value.subvar_name))
elif isinstance(input_value, str):
# Handle extra input due to string concat
pipeline_channels = (
pipeline_channel.extract_pipeline_channels_from_any(input_value)
)
for channel in pipeline_channels:
# value contains PipelineChannel placeholders which needs to be
# replaced. And the input needs to be added to the task spec.
# Form the name for the compiler injected input, and make sure it
# doesn't collide with any existing input names.
additional_input_name = (
_additional_input_name_for_pipeline_channel(channel))
# We don't expect collision to happen because we prefix the name
# of additional input with 'pipelinechannel--'. But just in case
# collision did happend, throw a RuntimeError so that we don't
# get surprise at runtime.
for existing_input_name, _ in task.inputs.items():
if existing_input_name == additional_input_name:
raise RuntimeError(
'Name collision between existing input name '
'{} and compiler injected input name {}'.format(
existing_input_name, additional_input_name))
additional_input_placeholder = (
placeholders.input_parameter_placeholder(
additional_input_name))
input_value = input_value.replace(channel.pattern,
additional_input_placeholder)
if channel.task_name:
# Value is produced by an upstream task.
if channel.task_name in tasks_in_current_dag:
# Dependent task within the same DAG.
pipeline_task_spec.inputs.parameters[
additional_input_name].task_output_parameter.producer_task = (
component_utils.sanitize_task_name(
channel.task_name))
pipeline_task_spec.inputs.parameters[
input_name].task_output_parameter.output_parameter_key = (
channel.name)
else:
# Dependent task not from the same DAG.
component_input_parameter = (
_additional_input_name_for_pipeline_channel(channel)
)
assert component_input_parameter in parent_component_inputs.parameters, \
'component_input_parameter: {} not found. All inputs: {}'.format(
component_input_parameter, parent_component_inputs)
pipeline_task_spec.inputs.parameters[
additional_input_name].component_input_parameter = (
component_input_parameter)
else:
# Value is from pipeline input. (or loop?)
component_input_parameter = channel.full_name
if component_input_parameter not in parent_component_inputs.parameters:
component_input_parameter = (
_additional_input_name_for_pipeline_channel(channel)
)
pipeline_task_spec.inputs.parameters[
additional_input_name].component_input_parameter = (
component_input_parameter)
pipeline_task_spec.inputs.parameters[
input_name].runtime_value.constant.string_value = input_value
elif isinstance(input_value, (str, int, float, bool, dict, list)):
pipeline_task_spec.inputs.parameters[
input_name].runtime_value.constant.CopyFrom(
_to_protobuf_value(input_value))
else:
raise ValueError(
'Input argument supports only the following types: '
'str, int, float, bool, dict, and list.'
f'Got {input_value} of type {type(input_value)}.')
return pipeline_task_spec
def build_component_spec_for_exit_task(
task: pipeline_task.PipelineTask,) -> pipeline_spec_pb2.ComponentSpec:
"""Builds ComponentSpec for an exit task.
Args:
task: The task to build a ComponentSpec for.
Returns:
A ComponentSpec object for the exit task.
"""
return build_component_spec_for_task(task=task, is_exit_task=True)
def build_component_spec_for_task(
task: pipeline_task.PipelineTask,
is_exit_task: bool = False,
) -> pipeline_spec_pb2.ComponentSpec:
"""Builds ComponentSpec for a pipeline task.
Args:
task: The task to build a ComponentSpec for.
is_exit_task: Whether the task is used as exit task in Exit Handler.
Returns:
A ComponentSpec object for the task.
"""
component_spec = pipeline_spec_pb2.ComponentSpec()
component_spec.executor_label = component_utils.sanitize_executor_label(
task.name)
for input_name, input_spec in (task.component_spec.inputs or {}).items():
# Special handling for PipelineTaskFinalStatus first.
if type_utils.is_task_final_status_type(input_spec.type):
if not is_exit_task:
raise ValueError(
'PipelineTaskFinalStatus can only be used in an exit task.')
component_spec.input_definitions.parameters[
input_name].parameter_type = pipeline_spec_pb2.ParameterType.STRUCT
continue
# skip inputs not present, as a workaround to support optional inputs.
if input_name not in task.inputs and input_spec.default is None:
continue
if type_utils.is_parameter_type(input_spec.type):
component_spec.input_definitions.parameters[
input_name].parameter_type = type_utils.get_parameter_type(
input_spec.type)
if input_spec.default is not None:
component_spec.input_definitions.parameters[
input_name].default_value.CopyFrom(
_to_protobuf_value(input_spec.default))
else:
component_spec.input_definitions.artifacts[
input_name].artifact_type.CopyFrom(
type_utils.get_artifact_type_schema(input_spec.type))
for output_name, output_spec in (task.component_spec.outputs or {}).items():
if type_utils.is_parameter_type(output_spec.type):
component_spec.output_definitions.parameters[
output_name].parameter_type = type_utils.get_parameter_type(
output_spec.type)
else:
component_spec.output_definitions.artifacts[
output_name].artifact_type.CopyFrom(
type_utils.get_artifact_type_schema(output_spec.type))
return component_spec
def build_importer_spec_for_task(
task: pipeline_task.PipelineTask
) -> pipeline_spec_pb2.PipelineDeploymentConfig.ImporterSpec:
"""Builds ImporterSpec for a pipeline task.
Args:
task: The task to build a ComponentSpec for.
Returns:
A ImporterSpec object for the task.
"""
type_schema = type_utils.get_artifact_type_schema(
task.importer_spec.type_schema)
importer_spec = pipeline_spec_pb2.PipelineDeploymentConfig.ImporterSpec(
type_schema=type_schema, reimport=task.importer_spec.reimport)
if task.importer_spec.metadata:
metadata_protobuf_struct = struct_pb2.Struct()
metadata_protobuf_struct.update(task.importer_spec.metadata)
importer_spec.metadata.CopyFrom(metadata_protobuf_struct)
if isinstance(task.importer_spec.artifact_uri,
pipeline_channel.PipelineParameterChannel):
importer_spec.artifact_uri.runtime_parameter = 'uri'
elif | |
<reponame>zhjpqq/scaledensenet
# -*- coding: utf-8 -*-
__author__ = 'ooo'
__date__ = '2019/6/9 12:17'
"""
Multi-Resolution Net 2019-6-20 20:58
"""
from collections import OrderedDict
import math
import torch
from torch import nn
from torch.nn import functional as F
from xmodules.classifier import AdaPoolView, ReturnX
import xtils
from xtils import GCU
class HSigmoid(nn.Module):
def __init__(self, inplace=True):
super(HSigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
out = F.relu6(x + 3, self.inplace) / 6
return out
def hsigmoid(x):
out = F.relu6(x + 3, inplace=True) / 6
return out
class SeModule(nn.Module):
_ActiveFuc = {'relu': nn.ReLU, 'hsig': HSigmoid, 'relu6': nn.ReLU6}
def __init__(self, indepth, reduction=4, active='hsig'):
super(SeModule, self).__init__()
"""
Squeeze-> x ->Expand, x => [batch, channels, 1, 1]
"""
assert active in self._ActiveFuc.keys()
Active = self._ActiveFuc[active]
self.se = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(indepth, indepth // reduction, 1, 1, 0, bias=False),
nn.ReLU(inplace=True),
nn.Conv2d(indepth // reduction, indepth, 1, 1, 0, bias=False),
Active(inplace=True)
)
def forward(self, x):
return x * self.se(x)
class BranchDownsize(nn.Module):
def __init__(self, factor=None, size=None, mode='nearest', align_corners=False):
super(BranchDownsize, self).__init__()
self.downsize = nn.Upsample(size, factor, mode, align_corners)
def forward(self, x):
if isinstance(x, (tuple, list)):
x3, x2, x1 = x
x3 = self.downsize(x3)
x2 = self.downsize(x2)
x1 = self.downsize(x1)
x = (x3, x2, x1)
else:
x = self.downsize(x)
# print('---->', x[0].size())
return x
def drop_connect(inputs, p, training):
""" Drop connect. """
if not training or p == 0:
return inputs
batch_size = inputs.shape[0]
keep_prob = 1 - p
random_tensor = keep_prob
random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype) # uniform [0,1)
binary_tensor = torch.floor(random_tensor)
output = inputs / keep_prob * binary_tensor
return output
class PreProc(nn.Module):
def __init__(self, indepth=3, outdepth=16, outnums=1, stride_dilate='1/4-1'):
super(PreProc, self).__init__()
assert outnums in [1, 2, 3]
assert stride_dilate in ['1/2-1', '1/2-2', '1/4-1', '1/4-2']
stride, dilate = stride_dilate.split('-')
self.stride = stride
self.outnums = outnums
# stride = 1/2
if dilate == '1':
self.conv1 = nn.Conv2d(indepth, outdepth, 3, 2, 1, dilation=1, bias=False)
else:
self.conv1 = nn.Conv2d(indepth, outdepth, 3, 2, 2, dilation=2, bias=False)
self.bn1 = nn.BatchNorm2d(outdepth)
self.act1 = nn.ReLU()
if stride == '1/4':
if dilate == '1':
self.conv2 = nn.Conv2d(outdepth, outdepth, 3, 2, 1, dilation=1, bias=False)
else:
self.conv2 = nn.Conv2d(outdepth, outdepth, 3, 2, 2, dilation=2, bias=False)
self.bn2 = nn.BatchNorm2d(outdepth)
self.act2 = nn.ReLU()
def forward(self, x):
# stride = '1/2'
x = self.conv1(x)
x = self.bn1(x)
x = self.act1(x)
if self.stride == '1/4':
x = self.conv2(x)
x = self.bn2(x)
x = self.act2(x)
if self.outnums == 1:
return x
elif self.outnums == 2:
return x, None
elif self.outnums == 3:
return x, None, None
class MoBlock(nn.Module):
_ActiveFuc = {'relu': nn.ReLU, 'hsig': HSigmoid, 'relu6': nn.ReLU6}
def __init__(self, indepth, outdepth, growth, pre_ind_grow, ksp='3.1.1', pre_ksp_half=False, groups='auto',
skgroups='gcu', active='relu', dropout=0.0, isse=1, seactive='hsig', first=False, idx=1):
"""
- indepth: 当前block的输入通道数.
- outdepth: 当前block的输出通道数.
- growth: 当前block的通道增长数.
- pre_ind_grow: 上一个block内, 输入通道数indepth + 通道增长数growth 的值.
- ksp: kernel_size, stride, padding in Depth-Wise Convolution. cur_ksp_half, 当前block内是否将特征图尺寸减半.
- pre_ksp_half: 上一个block内, 是否对特征图进行了尺寸减半.
- groups: groups 值 in Depth-Wise Convolution.
- skgroups: 所有 skip 连接的groups值, skip-groups
- active:
- dropout:
- isse: 是否包含SeModule. =1: no-SeModule ; >1: has-SeModule(reduction=isse) 默认值4
- first:
- idx:
"""
super(MoBlock, self).__init__()
Active = self._ActiveFuc[active]
ksp = [int(x) for x in ksp.split(sep='.')]
assert len(ksp) == 3
cur_ksp_half = bool(ksp[1] == 2)
self.ksp = ksp
assert dropout * (0.5 - dropout) >= 0, '<dropout> must be in [0, 0.5], but get %s .' % dropout
self.dropout = dropout
assert isse >= 1 and isinstance(isse, int), '<isse> must be a int >=1, but get %s .' % isse
self.isse = isse
self.first = first
self.idx = idx
if groups == '1x':
groups = 1
elif groups == 'auto':
groups = indepth + growth
curr_ind_grow = indepth + growth
self.conv1 = nn.Conv2d(indepth, curr_ind_grow, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(curr_ind_grow, eps=1e-05, momentum=0.1)
self.act1 = Active(inplace=True)
# depth-wise conv
self.conv2 = nn.Conv2d(curr_ind_grow, curr_ind_grow, ksp[0], ksp[1], ksp[2], groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(curr_ind_grow, eps=1e-05, momentum=0.1)
self.act2 = Active(inplace=True)
self.segate2 = [nn.Sequential(), SeModule(curr_ind_grow, isse, active=seactive)][isse != 1]
# 计算 skip1 & skip2
if self.first:
self.skip1 = nn.Sequential()
self.skip2 = nn.Sequential()
else:
if curr_ind_grow == pre_ind_grow:
skip_group = GCU(pre_ind_grow, curr_ind_grow) if skgroups == 'gcu' else 1
if not pre_ksp_half:
# print('init---> idx %s .' % idx)
self.skip1 = nn.Sequential()
else:
skip1_ksp = (2, 2, 0)
self.skip1 = nn.Sequential(
nn.Conv2d(pre_ind_grow, curr_ind_grow, skip1_ksp[0], skip1_ksp[1], skip1_ksp[2],
bias=False, groups=skip_group),
nn.BatchNorm2d(curr_ind_grow, eps=1e-05, momentum=0.1))
if not cur_ksp_half:
self.skip2 = nn.Sequential()
else:
skip2_ksp = (2, 2, 0)
self.skip2 = nn.Sequential(
nn.Conv2d(pre_ind_grow, curr_ind_grow, skip2_ksp[0], skip2_ksp[1], skip2_ksp[2],
bias=False, groups=skip_group),
nn.BatchNorm2d(curr_ind_grow, eps=1e-05, momentum=0.1))
elif curr_ind_grow != pre_ind_grow:
skip_group = GCU(pre_ind_grow, curr_ind_grow) if skgroups == 'gcu' else 1
skip1_ksp = (2, 2, 0) if pre_ksp_half else (1, 1, 0)
skip2_ksp = (2, 2, 0) if cur_ksp_half else (1, 1, 0)
self.skip1 = nn.Sequential(
nn.Conv2d(pre_ind_grow, curr_ind_grow, skip1_ksp[0], skip1_ksp[1], skip1_ksp[2],
bias=False, groups=skip_group),
nn.BatchNorm2d(curr_ind_grow, eps=1e-05, momentum=0.1))
self.skip2 = nn.Sequential(nn.Conv2d(pre_ind_grow, curr_ind_grow, skip2_ksp[0], skip2_ksp[1],
skip2_ksp[2], bias=False, groups=skip_group),
nn.BatchNorm2d(curr_ind_grow, eps=1e-05, momentum=0.1))
# 计算skip3
if outdepth == indepth and not cur_ksp_half:
self.skip3 = nn.Sequential()
else:
skip3_ksp = (2, 2, 0) if cur_ksp_half else (1, 1, 0)
skip_group = GCU(indepth, outdepth) if skgroups == 'gcu' else 1
self.skip3 = nn.Sequential(nn.Conv2d(indepth, outdepth, skip3_ksp[0], skip3_ksp[1], skip3_ksp[2],
bias=False, groups=skip_group),
nn.BatchNorm2d(outdepth, eps=1e-05, momentum=0.1))
# point-wise conv
self.conv3 = nn.Conv2d(curr_ind_grow, outdepth, kernel_size=1, stride=1, padding=0, groups=1, bias=False)
self.bn3 = nn.BatchNorm2d(outdepth, eps=1e-05, momentum=0.1)
self.act3 = Active(inplace=True)
self.drop3 = nn.Dropout2d(p=dropout, inplace=False)
def forward(self, x):
# print('\n-----> %s' % self.idx)
assert isinstance(x, (list, tuple)) and len(x) == 3
x3, x2, x1 = x # c3, c2, c1
if self.first:
c1 = self.act1(self.bn1(self.conv1(x3)))
c2 = self.act2(self.bn2(self.conv2(c1)))
c2 = self.segate2(c2)
c3 = self.act3(self.bn3(self.conv3(c2)))
c3 = self.drop3(c3)
c3 = c3 + self.skip3(x3)
# return c3, c2, c1
else:
c1 = self.act1(self.bn1(self.conv1(x3)))
c2 = self.act2(self.bn2(self.conv2(c1 + self.skip1(x1))))
c2 = self.segate2(c2)
c3 = self.act3(self.bn3(self.conv3(c2 + self.skip2(x2))))
c3 = self.drop3(c3)
c3 = c3 + self.skip3(x3)
# return c3, c2, c1
# xtils.print_size(c3)
return c3, c2, c1
class Clssifier(nn.Module):
_ActiveFuc = {'relu': nn.ReLU, 'hsig': HSigmoid, 'relu6': nn.ReLU6}
def __init__(self, indepth, middepth=0, outdepth=1000, dropout=(0,), active='relu'):
super(Clssifier, self).__init__()
assert isinstance(dropout, (list, tuple))
self.dropout = dropout
self.middepth = middepth
if middepth == 0:
assert len(self.dropout) >= 1
self.drop = nn.Dropout(p=self.dropout[0], inplace=False)
self.fc = nn.Linear(indepth, outdepth)
elif middepth > 0:
assert len(self.dropout) == 2
self.drop1 = nn.Dropout(p=self.dropout[0], inplace=False)
self.fc1 = nn.Linear(indepth, middepth)
self.drop2 = nn.Dropout(p=self.dropout[1], inplace=False)
self.fc2 = nn.Linear(middepth, outdepth)
def forward(self, x):
if self.middepth == 0:
x = self.drop(x)
x = self.fc(x)
elif self.middepth > 0:
x = self.drop1(x)
x = self.fc1(x)
x = self.drop2(x)
x = self.fc2(x)
return x
class ConcatSummary(nn.Module):
"""
汇总多个xfc的输出到一个fc; 或 汇总多个squeeze的输出到一个fc.
"""
def __init__(self, indepth, middepth=0, outdepth=1000, dropout=(0, 0), active='relu', with_fc=True):
"""
- indepth: 对所有输入x, 进行拼接后的输入通道数
- middepth: fc 层的中间隐藏层,=0 则无隐藏层
- outdepth: 输出通道数 => nlabels
- dropout: fc 层的辍学率
- active: fc 层的激活函数
- withfc: when indepth==outdepth, False => 不添加fc层,直接输出拼接向量进行分类.
"""
super(ConcatSummary, self).__init__()
if not with_fc:
assert indepth == outdepth, '<withfc> can be False only under <indepth>==<outdepth>.'
self.classifier = nn.Sequential()
else:
self.classifier = Clssifier(indepth, middepth, outdepth, dropout, active)
def forward(self, x):
# assert isinstance(x, (tuple, list))
x = torch.cat(x, dim=1)
x = self.classifier(x)
return x
def __repr__(self):
strme = '(\n (concat): torch.cat(dim=1)()\n' + \
' (classifier): ' + self.classifier.__repr__() + '\n)'
return strme
class PollSummary(nn.Module):
"""
汇总多个xfc的输出, 进行投票 ==> 平均投票法 & 最大投票法.
投票前可选择是否先进行归一化 F.softmax() or F.normalize().
"""
def __init__(self, method='avg', isnorm='none'):
super(PollSummary, self).__init__()
assert isnorm in ['none', 'softmax', 'normal', 'minmax']
self.isnorm = isnorm
self.method = method
if isnorm == 'none':
self.normalize = None
elif isnorm == 'softmax':
self.normalize = F.softmax
elif isnorm == 'normal':
self.normalize = F.normalize
elif isnorm == 'minmax':
self.normalize = self.minmax
else:
raise NotImplementedError
if method == 'avg':
self.reduce = torch.mean
elif method == 'max':
self.reduce = torch.max
elif method == 'sum':
self.reduce = torch.sum
else:
raise NotImplementedError
def minmax(self, x, dim=-1):
assert x.ndimension() == 2
min_x, max_x = x.min(dim)[0], x.max(dim)[0]
factor = (max_x - min_x).unsqueeze(dim)
x = (x - min_x.unsqueeze(dim)) / factor
return x
def forward(self, x):
assert isinstance(x, (tuple, list))
if self.isnorm != 'none':
x = [self.normalize(z, dim=-1) for z in x]
x = [z.unsqueeze_(dim=-1) for z in x]
x = torch.cat(x, dim=-1)
x = self.reduce(x, dim=-1)
return x
def __repr__(self):
string = self.__class__.__name__ + \
'(method={}, isnorm={})'.format(self.method, self.isnorm)
return string
class MultiScaleNet(nn.Module):
def | |
import os
import requests
import time
import json
import io
import numpy as np
import pandas as pd
import paavo_queries as paavo_queries
from sklearn.linear_model import LinearRegression
import statsmodels.api as sm
## NOTE: Table 9_koko access is forbidden from the API for some reasons.
# url to the API
MAIN_PAAVO_URL = 'http://pxnet2.stat.fi/PXWeb/api/v1/en/Postinumeroalueittainen_avoin_tieto/'
def paavo_url(level, table):
"""Helper to make url to the paavo API"""
return MAIN_PAAVO_URL + str(level) + '/' + table
def fetch_csv(url, destination_directory, file_name, query={"query": [], "response": {"format": "csv"}}):
"""Fetch a single file from PXweb API. File name should end with '.csv'"""
response = requests.post(url, json=query, stream=True, allow_redirects=True)
if not os.path.exists(destination_directory):
os.makedirs(destination_directory)
destination_file = os.path.join(destination_directory, file_name)
if response.status_code == 200:
open(destination_file, 'wb').write(response.content)
print('Downloaded ' + file_name + ' from ' + url)
else:
print('Could not download ' + file_name + ' from ' + url)
print('HTTP/1.1 ' + str(response.status_code))
time.sleep(1)
def fetch_paavo(destination_directory):
"""Fetch the whole Paavo directory"""
# Getting levels from Paavo database
levels = []
response = requests.post('http://pxnet2.stat.fi/PXWeb/api/v1/en/Postinumeroalueittainen_avoin_tieto/')
response_texts = json.loads(response.text)
for response_text in response_texts:
levels.append(str(response_text['id']))
paavo_directory = os.path.join(destination_directory, 'paavo_raw')
for level in levels:
response = requests.post('http://pxnet2.stat.fi/PXWeb/api/v1/en/Postinumeroalueittainen_avoin_tieto/' + str(level))
response_texts = json.loads(response.text)
table_data = {}
for response_text in response_texts:
table_data[response_text['id']] = str(response_text['text']).split('. ')[-1].replace("'", "").replace(" ", "_")
for (id, name) in table_data.items():
url = paavo_url(level, id)
file_name = name + '.csv'
fetch_csv(url, paavo_directory, file_name)
def fetch_dataframe(url, query={"query": [], "response": {"format": "csv"}}):
"""Download a table from PXweb API to a DataFrame"""
response = requests.post(url, json=query, stream=True, allow_redirects=True)
if response.status_code == 200:
byte_data = io.BytesIO(response.content)
df = pd.read_csv(byte_data, sep=',', encoding='iso-8859-1')
print('Downloaded data from ' + url)
return df
else:
print('Could not download from ' + url)
print('HTTP/1.1 ' + str(response.status_code))
return pd.DataFrame()
time.sleep(0.2)
def paavo_data():
"""Download the whole paavo directory to a dictionary with names as keys and dataframes as values"""
data = {}
# Getting levels from paavo database
levels = []
response = requests.post('http://pxnet2.stat.fi/PXWeb/api/v1/en/Postinumeroalueittainen_avoin_tieto/')
response_texts = json.loads(response.text)
for response_text in response_texts:
levels.append(str(response_text['id']))
for level in levels:
response = requests.post(
'http://pxnet2.stat.fi/PXWeb/api/v1/en/Postinumeroalueittainen_avoin_tieto/' + str(level))
response_texts = json.loads(response.text)
table_data = {}
for response_text in response_texts:
table_data[response_text['id']] = str(response_text['text']).split('. ')[-1].replace("'", "").replace(" ", "_")
for (id, name) in table_data.items():
url = paavo_url(level, id)
df = fetch_dataframe(url)
if not df.empty:
data[name] = df
time.sleep(1)
return data
def fetch_paavo_density_and_area(density_file_destination, area_file_destination):
def clean_df(df):
# Drop Finland row
df.drop(index=0, inplace=True)
# Extract postal code
df.rename(columns={df.columns[0]: 'Postal code'}, inplace=True)
df['Postal code'] = df['Postal code'].apply(lambda x: x.split(' ')[0])
# Replace '.' with 0 and set Postal code as index
df.replace({'.': 0}, inplace=True)
df.set_index('Postal code', inplace=True)
# Change data type of all columns to integer
for column in df.columns:
df[column] = df[column].astype(int)
return df
url_2013 = 'http://pxnet2.stat.fi/PXWeb/api/v1/en/Postinumeroalueittainen_avoin_tieto/2015/paavo_9_koko_2015.px/'
url_2014 = 'http://pxnet2.stat.fi/PXWeb/api/v1/en/Postinumeroalueittainen_avoin_tieto/2016/paavo_9_koko_2016.px/'
url_2015 = 'http://pxnet2.stat.fi/PXWeb/api/v1/en/Postinumeroalueittainen_avoin_tieto/2017/paavo_9_koko_2017.px/'
url_2016 = 'http://pxnet2.stat.fi/PXWeb/api/v1/en/Postinumeroalueittainen_avoin_tieto/2018/paavo_9_koko_2018.px/'
url_2017 = 'http://pxnet2.stat.fi/PXWeb/api/v1/en/Postinumeroalueittainen_avoin_tieto/2019/paavo_9_koko_2019.px/'
dfs = {}
years = np.array([[2014], [2015], [2016], [2017]])
# Download and clean each dataframe
dfs[2013] = clean_df(fetch_dataframe(url_2013, paavo_queries.surface_population_query))
dfs[2014] = clean_df(fetch_dataframe(url_2014, paavo_queries.surface_population_query))
dfs[2015] = clean_df(fetch_dataframe(url_2015, paavo_queries.surface_population_query))
dfs[2016] = clean_df(fetch_dataframe(url_2016, paavo_queries.surface_population_query))
dfs[2017] = clean_df(fetch_dataframe(url_2017, paavo_queries.surface_population_query))
# Change column labels
for (year, df) in dfs.items():
pop_str = 'Population (' + str(year) +')'
area_str = 'Surface area (' + str(year) + ')'
density_str = 'Density (' + str(year) +')'
if year > 2013:
df.rename(columns={df.columns[0]: area_str, df.columns[1]: pop_str}, inplace=True)
df.insert(2, density_str, df[pop_str] / df[area_str])
df.replace({0.0: np.nan})
else:
df.rename(columns={df.columns[0]: pop_str}, inplace=True)
df.replace({0.0: np.nan})
# Merge dataframe using Postal code index, manually adding density and surface area columns for 2013
main_table = dfs[2014]
main_table = main_table.merge(dfs[2013], how='left', on='Postal code')
main_table = main_table.merge(dfs[2015], how='left', on='Postal code')
main_table = main_table.merge(dfs[2016], how='left', on='Postal code')
main_table = main_table.merge(dfs[2017], how='left', on='Postal code')
main_table.insert(0, 'Density (2013)', np.nan)
main_table.insert(0, 'Surface area (2013)', np.nan)
densities = main_table[['Density (2014)', 'Density (2015)', 'Density (2016)', 'Density (2017)']]
# Linear regression on density. If density is negative, drop the latest density and retry. If there is only 1 usable density, copy it to the 2013 density
for index, row in densities.iterrows():
y = row.to_numpy()
valid_index = np.where(y >= 0)
valid_years = years[valid_index]
y = y[valid_index]
density_prediction = -1.0
while len(y) > 1 and density_prediction < 0:
reg = LinearRegression().fit(valid_years, y)
density_prediction = reg.predict([[2013]])
if density_prediction < 0:
y = y[:-1]
valid_years = valid_years[:-1]
if len(y) > 1:
main_table.at[index, 'Density (2013)'] = density_prediction
elif len(y) ==1:
main_table.at[index, 'Density (2013)'] = y[0]
else:
continue
# Calculate surface area using density and population
for index, row in main_table.iterrows():
if row['Population (2013)'] == np.nan:
continue
elif row['Population (2013)'] > 0 and row['Density (2013)'] > 0:
main_table.at[index, 'Surface area (2013)'] = round(row['Population (2013)']/row['Density (2013)'])
elif row['Population (2013)'] == 0 and row['Density (2013)'] == 0:
main_table.at[index, 'Surface area (2013)'] = row['Surface area (2014)']
main_table = main_table.fillna(0)
# Results
densities = main_table[['Density (2013)', 'Density (2014)', 'Density (2015)', 'Density (2016)', 'Density (2017)']]
areas = main_table[['Surface area (2013)', 'Surface area (2014)', 'Surface area (2015)', 'Surface area (2016)', 'Surface area (2017)']]
# Export to tsv files
densities.to_csv(density_file_destination, sep='\t')
areas.to_csv(area_file_destination, sep='\t')
def fetch_paavo_housing(destination_directory, postal_code_file, density_file):
def postal_standardize(df):
df= df.astype({'Postal code': str})
for i in list(df.index):
df.at[i, 'Postal code'] = '0' * (5-len(df.at[i,'Postal code']))+ df.at[i, 'Postal code']
return df
def postal_merge(left, right):
return left.merge(right, how='left', on='Postal code')
def get_mean_simple(df, n):
"""Calculate housing prices for groups of postal codes with the same first 6-n digits"""
df_n = pd.DataFrame(df['Postal code'].apply(lambda x: x[:(1 - n)]))
df_n.rename(columns={df_n.columns[0]: 'Postal code'}, inplace=True)
df_n = df_n.join(df[['Total value', 'Number']].copy())
df_n = df_n.groupby("Postal code", as_index=False).agg("sum")
df_n['Mean'] = df_n['Total value'] / df_n['Number']
df_n.drop(['Total value', 'Number'], axis=1, inplace=True)
# df_n.set_index('Postal code', inplace=True)
return df_n
def impute_simple(df, df_n):
"""Impute using the results above"""
df_ni = df_n.set_index('Postal code')
for code in list(df_n['Postal code']):
df_rows = np.array(df[df['Postal code'].str.startswith(code)].index)
for i in df_rows:
if df.at[i, 'Mean'] == 0 or np.isnan(df.at[i, 'Mean']):
df.at[i, 'Mean'] = df_ni.at[code, 'Mean']
return df
def impute_with_density(df, postal_df):
"""Impute with respect to density using a linear model"""
def postal_truncate(n):
df_n = postal_df.copy()
df_n['Postal code'] = df_n['Postal code'].apply(lambda x: x[:(1-n)])
df_n.drop_duplicates(subset='Postal code', inplace=True)
return df_n
def impute_price(df_, n):
truncated_postal = postal_truncate(n)
for code in truncated_postal['Postal code']:
sub_df = df_[df_['Postal code'].str.startswith(code)]
good_df = sub_df[sub_df['Mean'] != 0]
bad_df = sub_df[sub_df['Mean'] == 0]
if len(good_df.index) >= 7:
good_df = good_df.nsmallest(15, 'Mean')
X = good_df['Density']
y = good_df['Mean']
X = sm.add_constant(X.values)
model = sm.OLS(y, X).fit()
for i in bad_df.index:
if df_.at[i, 'Mean'] <= 0 or np.isnan(df_.at[i, 'Mean']):
df_.at[i, 'Mean'] = int(model.predict([1, df_.at[i, 'Density']])[0])
return df_
for i in range(3,6):
df = impute_price(df, i)
return df
main_table = postal_standardize(pd.read_csv(postal_code_file, sep='\t'))
density = postal_standardize(pd.read_csv(density_file, sep='\t'))
density = density.fillna(0)
postal_code = main_table.copy()
year_list = list(range(2005, 2018))
base_query = paavo_queries.ts_housing_query['query']
for year in year_list:
for quarter in range(5):
# Construct the json query
new_query = [{"code": "Vuosi", "selection": {"filter": "item", "values": [str(year)]}}, {"code": "Neljännes", "selection": {"filter": "item", "values": [str(quarter)]}}] + base_query
quarter_query = {"query": new_query, "response": {"format": "csv"}}
if quarter == 0:
mean_label = 'Housing price (' + str(year) + ')'
else:
mean_label = str(year) + 'Q' +str(quarter)
# Get the data table for the quarter
quarter_frame = postal_standardize(fetch_dataframe(paavo_queries.housing_url, query= quarter_query))
# Leave only Postal code and house price
quarter_frame = quarter_frame[['Postal code', 'Mean', 'Number']]
# Replace missing value '.' with '0'
quarter_frame.replace({'.': '0'}, inplace=True)
# Change mean to housing price and convert to float, number to Int
quarter_frame['Mean'] = quarter_frame['Mean'].astype(int)
quarter_frame['Number'] = quarter_frame['Number'].astype(int)
# Calculate the total housing value for each row
quarter_frame['Total value'] = quarter_frame['Mean'] * quarter_frame['Number']
# Get the complete postal code
quarter_frame = postal_merge(postal_code, quarter_frame)
# Change the numbers of houses where the prices are hidden to 0 so that the calculation of the group mean is not affected
for code in list(quarter_frame.index):
if quarter_frame.at[code, 'Mean'] == 0 or np.isnan(quarter_frame.at[code, 'Mean']):
quarter_frame.at[code, 'Number'] = 0
if year < 2013:
# Calculating the average housing price of postal codes with the same first 3, 2, 1 digits
quarter_frame_3 = get_mean_simple(quarter_frame, 3)
quarter_frame_4 = get_mean_simple(quarter_frame, 4)
quarter_frame_5 = get_mean_simple(quarter_frame, 5)
# Fill df_4 empty values with that of df_5 and df_3 with that of df_4
quarter_frame_4 = impute_simple(quarter_frame_4, quarter_frame_5)
quarter_frame_3 = impute_simple(quarter_frame_3, quarter_frame_4)
# Round mean values and fill empty cells with zero, though there should not be any at this point
quarter_frame_3.fillna(0, inplace=True)
quarter_frame_3['Mean'] = quarter_frame_3['Mean'].astype(int)
# Fill the | |
relations of the form
# sin(x + y) - sin(x)*cos(y) - sin(y)*cos(x), etc. Geometric primality is
# preserved by the same argument as before.
def parse_hints(hints):
"""Split hints into (n, funcs, iterables, gens)."""
n = 1
funcs, iterables, gens = [], [], []
for e in hints:
if isinstance(e, (SYMPY_INTS, Integer)):
n = e
elif isinstance(e, FunctionClass):
funcs.append(e)
elif iterable(e):
iterables.append((e[0], e[1:]))
# XXX sin(x+2y)?
# Note: we go through polys so e.g.
# sin(-x) -> -sin(x) -> sin(x)
gens.extend(parallel_poly_from_expr(
[e[0](x) for x in e[1:]] + [e[0](Add(*e[1:]))])[1].gens)
else:
gens.append(e)
return n, funcs, iterables, gens
def build_ideal(x, terms):
"""
Build generators for our ideal. Terms is an iterable with elements of
the form (fn, coeff), indicating that we have a generator fn(coeff*x).
If any of the terms is trigonometric, sin(x) and cos(x) are guaranteed
to appear in terms. Similarly for hyperbolic functions. For tan(n*x),
sin(n*x) and cos(n*x) are guaranteed.
"""
I = []
y = Dummy('y')
for fn, coeff in terms:
for c, s, t, rel in (
[cos, sin, tan, cos(x)**2 + sin(x)**2 - 1],
[cosh, sinh, tanh, cosh(x)**2 - sinh(x)**2 - 1]):
if coeff == 1 and fn in [c, s]:
I.append(rel)
elif fn == t:
I.append(t(coeff*x)*c(coeff*x) - s(coeff*x))
elif fn in [c, s]:
cn = fn(coeff*y).expand(trig=True).subs(y, x)
I.append(fn(coeff*x) - cn)
return list(set(I))
def analyse_gens(gens, hints):
"""
Analyse the generators ``gens``, using the hints ``hints``.
The meaning of ``hints`` is described in the main docstring.
Return a new list of generators, and also the ideal we should
work with.
"""
# First parse the hints
n, funcs, iterables, extragens = parse_hints(hints)
debug('n=%s' % n, 'funcs:', funcs, 'iterables:',
iterables, 'extragens:', extragens)
# We just add the extragens to gens and analyse them as before
gens = list(gens)
gens.extend(extragens)
# remove duplicates
funcs = list(set(funcs))
iterables = list(set(iterables))
gens = list(set(gens))
# all the functions we can do anything with
allfuncs = {sin, cos, tan, sinh, cosh, tanh}
# sin(3*x) -> ((3, x), sin)
trigterms = [(g.args[0].as_coeff_mul(), g.func) for g in gens
if g.func in allfuncs]
# Our list of new generators - start with anything that we cannot
# work with (i.e. is not a trigonometric term)
freegens = [g for g in gens if g.func not in allfuncs]
newgens = []
trigdict = {}
for (coeff, var), fn in trigterms:
trigdict.setdefault(var, []).append((coeff, fn))
res = [] # the ideal
for key, val in trigdict.items():
# We have now assembeled a dictionary. Its keys are common
# arguments in trigonometric expressions, and values are lists of
# pairs (fn, coeff). x0, (fn, coeff) in trigdict means that we
# need to deal with fn(coeff*x0). We take the rational gcd of the
# coeffs, call it ``gcd``. We then use x = x0/gcd as "base symbol",
# all other arguments are integral multiples thereof.
# We will build an ideal which works with sin(x), cos(x).
# If hint tan is provided, also work with tan(x). Moreover, if
# n > 1, also work with sin(k*x) for k <= n, and similarly for cos
# (and tan if the hint is provided). Finally, any generators which
# the ideal does not work with but we need to accommodate (either
# because it was in expr or because it was provided as a hint)
# we also build into the ideal.
# This selection process is expressed in the list ``terms``.
# build_ideal then generates the actual relations in our ideal,
# from this list.
fns = [x[1] for x in val]
val = [x[0] for x in val]
gcd = reduce(igcd, val)
terms = [(fn, v/gcd) for (fn, v) in zip(fns, val)]
fs = set(funcs + fns)
for c, s, t in ([cos, sin, tan], [cosh, sinh, tanh]):
if any(x in fs for x in (c, s, t)):
fs.add(c)
fs.add(s)
for fn in fs:
for k in range(1, n + 1):
terms.append((fn, k))
extra = []
for fn, v in terms:
if fn == tan:
extra.append((sin, v))
extra.append((cos, v))
if fn in [sin, cos] and tan in fs:
extra.append((tan, v))
if fn == tanh:
extra.append((sinh, v))
extra.append((cosh, v))
if fn in [sinh, cosh] and tanh in fs:
extra.append((tanh, v))
terms.extend(extra)
x = gcd*Mul(*key)
r = build_ideal(x, terms)
res.extend(r)
newgens.extend(set(fn(v*x) for fn, v in terms))
# Add generators for compound expressions from iterables
for fn, args in iterables:
if fn == tan:
# Tan expressions are recovered from sin and cos.
iterables.extend([(sin, args), (cos, args)])
elif fn == tanh:
# Tanh expressions are recovered from sihn and cosh.
iterables.extend([(sinh, args), (cosh, args)])
else:
dummys = symbols('d:%i' % len(args), cls=Dummy)
expr = fn( Add(*dummys)).expand(trig=True).subs(list(zip(dummys, args)))
res.append(fn(Add(*args)) - expr)
if myI in gens:
res.append(myI**2 + 1)
freegens.remove(myI)
newgens.append(myI)
return res, freegens, newgens
myI = Dummy('I')
expr = expr.subs(S.ImaginaryUnit, myI)
subs = [(myI, S.ImaginaryUnit)]
num, denom = cancel(expr).as_numer_denom()
try:
(pnum, pdenom), opt = parallel_poly_from_expr([num, denom])
except PolificationFailed:
return expr
debug('initial gens:', opt.gens)
ideal, freegens, gens = analyse_gens(opt.gens, hints)
debug('ideal:', ideal)
debug('new gens:', gens, " -- len", len(gens))
debug('free gens:', freegens, " -- len", len(gens))
# NOTE we force the domain to be ZZ to stop polys from injecting generators
# (which is usually a sign of a bug in the way we build the ideal)
if not gens:
return expr
G = groebner(ideal, order=order, gens=gens, domain=ZZ)
debug('groebner basis:', list(G), " -- len", len(G))
# If our fraction is a polynomial in the free generators, simplify all
# coefficients separately:
from sympy.simplify.ratsimp import ratsimpmodprime
if freegens and pdenom.has_only_gens(*set(gens).intersection(pdenom.gens)):
num = Poly(num, gens=gens+freegens).eject(*gens)
res = []
for monom, coeff in num.terms():
ourgens = set(parallel_poly_from_expr([coeff, denom])[1].gens)
# We compute the transitive closure of all generators that can
# be reached from our generators through relations in the ideal.
changed = True
while changed:
changed = False
for p in ideal:
p = Poly(p)
if not ourgens.issuperset(p.gens) and \
not p.has_only_gens(*set(p.gens).difference(ourgens)):
changed = True
ourgens.update(p.exclude().gens)
# NOTE preserve order!
realgens = [x for x in gens if x in ourgens]
# The generators of the ideal have now been (implicitly) split
# into two groups: those involving ourgens and those that don't.
# Since we took the transitive closure above, these two groups
# live in subgrings generated by a *disjoint* set of variables.
# Any sensible groebner basis algorithm will preserve this disjoint
# structure (i.e. the elements of the groebner basis can be split
# similarly), and and the two subsets of the groebner basis then
# form groebner bases by themselves. (For the smaller generating
# sets, of course.)
ourG = [g.as_expr() for g in G.polys if
g.has_only_gens(*ourgens.intersection(g.gens))]
res.append(Mul(*[a**b for a, b in zip(freegens, monom)]) * \
ratsimpmodprime(coeff/denom, ourG, order=order,
gens=realgens, quick=quick, domain=ZZ,
polynomial=polynomial).subs(subs))
return Add(*res)
# NOTE The following is simpler and has less assumptions on the
# groebner basis algorithm. If the above turns out to be broken,
# use this.
return Add(*[Mul(*[a**b for a, b in zip(freegens, monom)]) * \
ratsimpmodprime(coeff/denom, list(G), order=order,
gens=gens, quick=quick, domain=ZZ)
for monom, coeff in num.terms()])
else:
return ratsimpmodprime(
expr, list(G), order=order, gens=freegens+gens,
quick=quick, domain=ZZ, polynomial=polynomial).subs(subs)
_trigs = (TrigonometricFunction, HyperbolicFunction)
def trigsimp(expr, **opts):
"""
reduces expression by using known trig identities
Notes
=====
method:
- Determine the method to use. Valid choices are 'matching' (default),
'groebner', 'combined', and 'fu'. If 'matching', simplify the
expression recursively by targeting common patterns. If 'groebner', apply
an experimental groebner basis algorithm. In this case further options
are forwarded to ``trigsimp_groebner``, please refer to its docstring.
If 'combined', first run the groebner basis algorithm with small
default parameters, then run the 'matching' algorithm. 'fu' runs the
collection of trigonometric transformations described by Fu, et al.
(see the `fu` docstring).
Examples
========
>>> from sympy import trigsimp, sin, cos, log
>>> from sympy.abc import x, y
>>> e = 2*sin(x)**2 + 2*cos(x)**2
>>> trigsimp(e)
2
Simplification occurs | |
accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"remove_user_from_group got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"userGroupMembershipId": user_group_membership_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def reset_idp_scim_client(self, identity_provider_id, **kwargs):
"""
Resets the OAuth2 client credentials for the SCIM client associated with this identity provider.
:param str identity_provider_id: (required)
The OCID of the identity provider.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.identity.models.ScimClientCredentials`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/identityProviders/{identityProviderId}/actions/resetScimClient"
method = "POST"
expected_kwargs = ["retry_strategy"]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"reset_idp_scim_client got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"identityProviderId": identity_provider_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ScimClientCredentials")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ScimClientCredentials")
def update_auth_token(self, user_id, auth_token_id, update_auth_token_details, **kwargs):
"""
Updates the specified auth token's description.
:param str user_id: (required)
The OCID of the user.
:param str auth_token_id: (required)
The OCID of the auth token.
:param UpdateAuthTokenDetails update_auth_token_details: (required)
Request object for updating an auth token.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.identity.models.AuthToken`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/users/{userId}/authTokens/{authTokenId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_auth_token got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"userId": user_id,
"authTokenId": auth_token_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_auth_token_details,
response_type="AuthToken")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_auth_token_details,
response_type="AuthToken")
def update_authentication_policy(self, compartment_id, update_authentication_policy_details, **kwargs):
"""
Updates authentication policy for the specified tenancy
:param str compartment_id: (required)
The OCID of the compartment.
:param UpdateAuthenticationPolicyDetails update_authentication_policy_details: (required)
Request object for updating the authentication policy.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.identity.models.AuthenticationPolicy`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/authenticationPolicies/{compartmentId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_authentication_policy got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"compartmentId": compartment_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_authentication_policy_details,
response_type="AuthenticationPolicy")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_authentication_policy_details,
response_type="AuthenticationPolicy")
def update_compartment(self, compartment_id, update_compartment_details, **kwargs):
"""
Updates the specified compartment's description or name. You can't update the root compartment.
:param str compartment_id: (required)
The OCID of the compartment.
:param UpdateCompartmentDetails update_compartment_details: (required)
Request object for updating a compartment.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.identity.models.Compartment`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/compartments/{compartmentId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"compartmentId": compartment_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
| |
<reponame>mcmasterg/symbiflow-arch-defs<filename>utils/lib/rr_graph/channel.py
#!/usr/bin/env python3
import enum
import io
from collections import namedtuple
from . import Pos
from . import Size
from . import static_property
from ..asserts import assert_type
from ..asserts import assert_len_eq
class ChannelNotStraight(TypeError):
pass
_Channel = namedtuple("Channel", ("start", "end", "idx"))
class Channel(_Channel):
class Type(enum.Enum):
X = 'CHANX'
Y = 'CHANY'
def __repr__(self):
return 'Channel.Type.'+self.name
class Direction(enum.Enum):
INC = 'INC_DIR'
DEC = 'DEC_DIR'
def __repr__(self):
return 'Channel.Direction.'+self.name
def __new__(cls, start, end, idx=None, id_override=None):
if not isinstance(start, Pos):
start = Pos(*start)
if not isinstance(end, Pos):
end = Pos(*end)
if start.x != end.x and start.y != end.y:
raise ChannelNotStraight(
"Channel not straight! {}->{}".format(start, end))
if idx is not None:
assert_type(idx, int)
obj = _Channel.__new__(cls, start, end, idx)
obj.id_override = id_override
return obj
@static_property
def type(self):
"""Type of the channel.
Returns: Channel.Type
>>> Channel((0, 0), (10, 0)).type
Channel.Type.Y
>>> Channel((0, 0), (0, 10)).type
Channel.Type.X
>>> Channel((1, 1), (1, 1)).type
Channel.Type.X
"""
if self.start.x == self.end.x:
return Channel.Type.X
elif self.start.y == self.end.y:
return Channel.Type.Y
else:
assert False
@static_property
def start0(self):
"""The non-constant start coordinate.
>>> Channel((0, 0), (10, 0)).start0
0
>>> Channel((0, 0), (0, 10)).start0
0
>>> Channel((1, 1), (1, 1)).start0
1
>>> Channel((10, 0), (0, 0)).start0
10
>>> Channel((0, 10), (0, 0)).start0
10
"""
if self.type == Channel.Type.Y:
return self.start.x
elif self.type == Channel.Type.X:
return self.start.y
else:
assert False
@static_property
def end0(self):
"""The non-constant start coordinate.
>>> Channel((0, 0), (10, 0)).end0
10
>>> Channel((0, 0), (0, 10)).end0
0
>>> Channel((1, 1), (1, 1)).end0
1
>>> Channel((10, 0), (0, 0)).end0
0
>>> Channel((0, 10), (0, 0)).end0
0
"""
if self.type == Channel.Type.Y:
return self.end.x
elif self.type == Channel.Type.X:
return self.end.y
else:
assert False
@static_property
def common(self):
"""The common coordinate value.
>>> Channel((0, 0), (10, 0)).common
0
>>> Channel((0, 0), (0, 10)).common
0
>>> Channel((1, 1), (1, 1)).common
1
>>> Channel((10, 0), (0, 0)).common
0
>>> Channel((0, 10), (0, 0)).common
0
>>> Channel((4, 10), (4, 0)).common
4
"""
if self.type == Channel.Type.Y:
assert self.start.y == self.end.y
return self.start.y
elif self.type == Channel.Type.X:
assert self.start.x == self.end.x
return self.start.x
else:
assert False
@static_property
def direction(self):
"""Direction the channel runs.
Returns: Channel.Direction
>>> Channel((0, 0), (10, 0)).direction
Channel.Direction.INC
>>> Channel((0, 0), (0, 10)).direction
Channel.Direction.INC
>>> Channel((1, 1), (1, 1)).direction
Channel.Direction.INC
>>> Channel((10, 0), (0, 0)).direction
Channel.Direction.DEC
>>> Channel((0, 10), (0, 0)).direction
Channel.Direction.DEC
"""
if self.end0 < self.start0:
return Channel.Direction.DEC
else:
return Channel.Direction.INC
@static_property
def length(self):
"""Length of the channel.
>>> Channel((0, 0), (10, 0)).length
10
>>> Channel((0, 0), (0, 10)).length
10
>>> Channel((1, 1), (1, 1)).length
0
>>> Channel((10, 0), (0, 0)).length
10
>>> Channel((0, 10), (0, 0)).length
10
"""
return abs(self.end0 - self.start0)
def update_idx(self, idx):
"""Create a new channel with the same start/end but new index value.
>>> s = (1, 4)
>>> e = (1, 8)
>>> c1 = Channel(s, e, 0)
>>> c2 = c1.update_idx(2)
>>> assert c1.start == c2.start
>>> assert c1.end == c2.end
>>> c1.idx
0
>>> c2.idx
2
"""
return self.__class__(self.start, self.end, idx, id_override=self.id_override)
def __repr__(self):
"""
>>> repr(Channel((0, 0), (10, 0)))
'C((0,0), (10,0))'
>>> repr(Channel((0, 0), (0, 10)))
'C((0,0), (0,10))'
>>> repr(Channel((1, 2), (3, 2), 5))
'C((1,2), (3,2), 5)'
>>> repr(Channel((1, 2), (3, 2), None, "ABC"))
'C(ABC)'
>>> repr(Channel((1, 2), (3, 2), 5, "ABC"))
'C(ABC,5)'
"""
if self.id_override:
idx_str = ""
if self.idx != None:
idx_str = ",{}".format(self.idx)
return "C({}{})".format(self.id_override, idx_str)
idx_str = ""
if self.idx != None:
idx_str = ", {}".format(self.idx)
return "C(({},{}), ({},{}){})".format(
self.start.x, self.start.y, self.end.x, self.end.y, idx_str)
def __str__(self):
"""
>>> str(Channel((0, 0), (10, 0)))
'CHANY 0,0->10,0'
>>> str(Channel((0, 0), (0, 10)))
'CHANX 0,0->0,10'
>>> str(Channel((1, 2), (3, 2), 5))
'CHANY 1,2->3,2 @5'
>>> str(Channel((1, 2), (3, 2), None, "ABC"))
'ABC'
>>> str(Channel((1, 2), (3, 2), 5, "ABC"))
'ABC@5'
"""
idx_str = ""
if self.idx != None:
idx_str = " @{}".format(self.idx)
if self.id_override:
return "{}{}".format(self.id_override, idx_str[1:])
return "{} {},{}->{},{}{}".format(
self.type.value, self.start.x, self.start.y, self.end.x, self.end.y, idx_str)
# Nice short alias..
C = Channel
class ChannelGrid(dict):
def __init__(self, size, chan_type):
self.chan_type = chan_type
self.size = Size(*size)
for x in range(0, self.x):
for y in range(0, self.y):
self[Pos(x,y)] = []
@property
def x(self):
return self.size.x
@property
def y(self):
return self.size.y
def column(self, x):
column = []
for y in range(0, self.y):
column.append(self[Pos(x, y)])
return column
def row(self, y):
row = []
for x in range(0, self.x):
row.append(self[Pos(x, y)])
return row
def add_channel(self, ch):
"""
>>> g = ChannelGrid((10, 10), Channel.Type.Y)
>>> # Adding the first channel
>>> g.add_channel(Channel((0, 5), (3, 5), None, "A"))
C(A,0)
>>> g[(0,5)]
[C(A,0)]
>>> g[(1,5)]
[C(A,0)]
>>> g[(3,5)]
[C(A,0)]
>>> g[(4,5)]
[None]
>>> # Adding second non-overlapping second channel
>>> g.add_channel(Channel((4, 5), (6, 5), None, "B"))
C(B,0)
>>> g[(3,5)]
[C(A,0)]
>>> g[(4,5)]
[C(B,0)]
>>> g[(6,5)]
[C(B,0)]
>>> g[(7,5)]
[None]
>>> # Adding third channel which overlaps with second channel
>>> g.add_channel(Channel((4, 5), (6, 5), None, "C"))
C(C,1)
>>> g[(3,5)]
[C(A,0), None]
>>> g[(4,5)]
[C(B,0), C(C,1)]
>>> g[(6,5)]
[C(B,0), C(C,1)]
>>> # Adding a channel which overlaps, but is a row over
>>> g.add_channel(Channel((4, 6), (6, 6), None, "D"))
C(D,0)
>>> g[(4,5)]
[C(B,0), C(C,1)]
>>> g[(4,6)]
[C(D,0)]
>>> # Adding fourth channel which overlaps both the first
>>> # and second+third channel
>>> g.add_channel(Channel((2, 5), (5, 5), None, "E"))
C(E,2)
>>> g[(1,5)]
[C(A,0), None, None]
>>> g[(2,5)]
[C(A,0), None, C(E,2)]
>>> g[(5,5)]
[C(B,0), C(C,1), C(E,2)]
>>> g[(6,5)]
[C(B,0), C(C,1), None]
>>> # This channel fits in the hole left by the last one.
>>> g.add_channel(Channel((0, 5), (2, 5), None, "F"))
C(F,1)
>>> g[(0,5)]
[C(A,0), C(F,1), None]
>>> g[(1,5)]
[C(A,0), C(F,1), None]
>>> g[(2,5)]
[C(A,0), C(F,1), C(E,2)]
>>> g[(3,5)]
[C(A,0), None, C(E,2)]
>>> # Add another channel which causes a hole
>>> g.add_channel(Channel((0, 5), (6, 5), None, "G"))
C(G,3)
>>> g[(0,5)]
[C(A,0), C(F,1), None, C(G,3)]
>>> g[(1,5)]
[C(A,0), C(F,1), None, C(G,3)]
>>> g[(2,5)]
[C(A,0), C(F,1), C(E,2), C(G,3)]
>>> g[(3,5)]
[C(A,0), None, C(E,2), C(G,3)]
>>> g[(4,5)]
[C(B,0), C(C,1), C(E,2), C(G,3)]
>>> g[(5,5)]
[C(B,0), C(C,1), C(E,2), C(G,3)]
>>> g[(6,5)]
[C(B,0), C(C,1), None, C(G,3)]
>>> g[(7,5)]
[None, None, None, None]
"""
assert ch.idx == None
if ch.type != self.chan_type:
if ch.length != 0:
raise TypeError(
"Can only add channels of type {} which {} ({}) is not.".format(
self.chan_type, ch, ch.type))
else:
ch.type = self.chan_type
if ch.type == Channel.Type.X:
l = self.column(ch.common)
elif ch.type == Channel.Type.Y:
l = self.row(ch.common)
else:
assert False
assert_len_eq(l)
s = ch.start0
e = ch.end0
if ch.direction == Channel.Direction.DEC:
e, s = s, e
assert e >= s
assert s < len(l), (s, '<', len(l), l)
assert e < len(l), (e+1, '<', len(l), l)
# Find a idx that this channel fits.
max_idx = 0
while True:
for p in l[s:e+1]:
while len(p) < max_idx+1:
p.append(None)
if p[max_idx] != None:
max_idx += 1
break
else:
break
# Make sure everything has the same length.
for p in l:
while len(p) < max_idx+1:
p.append(None)
assert_len_eq(l)
ch = ch.update_idx(max_idx)
assert ch.idx == max_idx
for p in l[s:e+1]:
p[ch.idx] = ch
return ch
def pretty_print(self):
"""
If type == Channel.Type.X
A--AC-C
B-----B
D--DE-E
F-----F
If type == Channel.Type.Y
AB DF
|| ||
|| ||
A| D|
C| E|
|| ||
CB EF
"""
def get_str(ch):
if not ch:
s = ""
elif ch.id_override:
s = ch.id_override
else:
s = str(ch)
return s
# Work out how many characters the largest label takes up.
s_maxlen = 1
for row in range(0, self.y):
for col in range(0, self.x):
for ch in self[(col,row)]:
s_maxlen = max(s_maxlen, len(get_str(ch)))
assert s_maxlen > 0, s_maxlen
s_maxlen += 3
if self.chan_type == Channel.Type.Y:
beg_fmt = "{:>%i}>" % (s_maxlen-1)
end_fmt = "->{:<%i}" % (s_maxlen-2)
mid_fmt = "-"*s_maxlen
elif self.chan_type == Channel.Type.X:
beg_fmt = "{:^%i}" % s_maxlen
end_fmt = beg_fmt
mid_fmt = beg_fmt.format("|")
else:
assert False
non_fmt = " "*s_maxlen
rows = []
for y in range(0, self.y):
cols = []
for x in range(0, self.x):
channels = [("|{: ^%i}" % (s_maxlen-1)).format(x)]
for ch in self[(x,y)]:
if not ch:
fmt = non_fmt
elif ch.start == ch.end:
s = get_str(ch)
channels.append("{} ".format("".join([
beg_fmt.format(s),
mid_fmt.format(s),
end_fmt.format(s),
])[:s_maxlen-1]))
| |
with pytest.raises(RSENotFound):
mgr.get_rse_info(rse="TheOnethatshouldnotbehere", **self.vo)
def test_get_protocols_operations(self):
""" RSE (CLIENTS): get protocols for operations of rse."""
protocol_rse = rse_name_generator()
self.client.add_rse(protocol_rse)
protocols = [{'scheme': 'MOCK_READ_WRITE_DELETE',
'hostname': 'localhost',
'port': 17,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 1,
'write': 1,
'delete': 1}},
'extended_attributes': 'TheOneWithAllTheRest'},
{'scheme': 'MOCK_WRITE_DELETE',
'hostname': 'localhost',
'port': 42,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 0,
'write': 1,
'delete': 1}},
'extended_attributes': 'TheOneWithAllTheRest'},
{'scheme': 'MOCK_DELETE',
'hostname': 'localhost',
'port': 19,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 0,
'write': 0,
'delete': 1}},
'extended_attributes': 'TheOneWithAllTheRest'}, ]
# Protocol identifier include supported operations
for p in protocols:
self.client.add_protocol(protocol_rse, p)
ops = {'read': 1, 'write': 2, 'delete': 3}
rse_attr = mgr.get_rse_info(rse=protocol_rse, **self.vo)
for op in ops:
# resp = self.client.get_protocols(protocol_rse, operation=op, protocol_domain='lan')
p = mgr.select_protocol(rse_attr, op, domain='lan')
if op not in p['scheme'].lower():
for p in protocols:
self.client.delete_protocols(protocol_rse, p['scheme'])
self.client.delete_rse(protocol_rse)
raise Exception('Unexpected protocols returned for %s: %s' % (op, p))
for p in protocols:
self.client.delete_protocols(protocol_rse, p['scheme'])
self.client.delete_rse(protocol_rse)
def test_get_protocols_defaults(self):
""" RSE (CLIENTS): get default protocols for operations of rse."""
protocol_rse = rse_name_generator()
self.client.add_rse(protocol_rse)
protocols = [{'scheme': 'MOCK_READ',
'hostname': 'localhost',
'port': 17,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 1},
'wan': {'delete': 1}
},
'extended_attributes': 'TheOneWithAllTheRest'},
{'scheme': 'MOCK_WRITE',
'hostname': 'localhost',
'port': 42,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'write': 1},
'wan': {'read': 1}
},
'extended_attributes': 'TheOneWithAllTheRest'},
{'scheme': 'MOCK_DELETE',
'hostname': 'localhost',
'port': 19,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'delete': 1},
'wan': {'write': 1}},
'extended_attributes': 'TheOneWithAllTheRest'}, ]
for p in protocols:
self.client.add_protocol(protocol_rse, p)
rse_attr = mgr.get_rse_info(rse=protocol_rse, **self.vo)
for op in ['delete', 'read', 'write']:
# resp = self.client.get_protocols(protocol_rse, operation=op, default=True, protocol_domain='lan')
p = mgr.select_protocol(rse_attr, op, domain='lan')
print(p['scheme'])
print(op)
if op not in p['scheme'].lower():
for p in protocols:
self.client.delete_protocols(protocol_rse, p['scheme'])
self.client.delete_rse(protocol_rse)
raise Exception('Unexpected protocols returned for %s: %s' % (op, p))
for op in ['delete', 'read', 'write']:
# resp = self.client.get_protocols(protocol_rse, operation=op, default=True, protocol_domain='wan')
p = mgr.select_protocol(rse_attr, op, domain='wan')
if ((op == 'delete') and (p['port'] != 17)) or ((op == 'read') and (p['port'] != 42)) or ((op == 'write') and (p['port'] != 19)):
for p in protocols:
self.client.delete_protocols(protocol_rse, p['scheme'])
self.client.delete_rse(protocol_rse)
raise Exception('Unexpected protocols returned for %s: %s' % (op, p))
for p in protocols:
self.client.delete_protocols(protocol_rse, p['scheme'])
self.client.delete_rse(protocol_rse)
def test_get_protocols_nested_attributes(self):
""" RSE (CLIENTS): get nested extended_attributes."""
protocol_rse = rse_name_generator()
self.client.add_rse(protocol_rse)
protocols = [{'scheme': 'MOCK_READ',
'hostname': 'localhost',
'port': 17,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 1},
'wan': {'delete': 1}
},
'extended_attributes': {'Some': 'value', 'more': {'value1': 1, 'value2': 0}}}]
for p in protocols:
self.client.add_protocol(protocol_rse, p)
resp = mgr.get_rse_info(rse=protocol_rse, **self.vo)['protocols']
assert((not resp[0]['extended_attributes']['more']['value2']) and resp[0]['extended_attributes']['more']['value1'])
def test_get_protocols_operations_not_supported(self):
""" RSE (CLIENTS): get protocols for operations of rse (RSEOperationNotSupported)."""
protocol_rse = rse_name_generator()
self.client.add_rse(protocol_rse)
protocols = [{'scheme': 'MOCK_WRITE_DELETE',
'hostname': 'localhost',
'port': 17,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 0,
'write': 1,
'delete': 1}},
'extended_attributes': 'TheOneWithAllTheRest'},
{'scheme': 'MOCK_WRITE_DELETE',
'hostname': 'localhost',
'port': 42,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 0,
'write': 1,
'delete': 1}},
'extended_attributes': 'TheOneWithAllTheRest'},
{'scheme': 'MOCK_DELETE',
'hostname': 'localhost',
'port': 19,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 0,
'write': 0,
'delete': 1}},
'extended_attributes': 'TheOneWithAllTheRest'}, ]
# Protocol for read is undefined
for p in protocols:
self.client.add_protocol(protocol_rse, p)
try:
rse_attr = mgr.get_rse_info(rse=protocol_rse, **self.vo)
rse_attr['domain'] = ['lan']
with pytest.raises(exception.RSEProtocolNotSupported):
mgr.select_protocol(rse_attr, 'read')
finally:
self.client.delete_rse(protocol_rse)
def test_get_protocols_domain_not_exist(self):
""" RSE (CLIENTS): get protocols for operations of rse in not existing domain (RSEProtocolDomainNotSupported)."""
protocol_rse = rse_name_generator()
self.client.add_rse(protocol_rse)
attributes = {'scheme': 'MOCK',
'hostname': 'localhost',
'port': 17,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 0,
'write': 1,
'delete': 1}},
'extended_attributes': 'TheOneWithAllTheRest'}
# Protocol for read is undefined
self.client.add_protocol(protocol_rse, attributes)
try:
rse_attr = mgr.get_rse_info(rse=protocol_rse, **self.vo)
with pytest.raises(exception.RSEProtocolDomainNotSupported):
mgr.select_protocol(rse_attr, 'write', domain='FRIENDS')
finally:
self.client.delete_rse(protocol_rse)
def test_get_protocols_domain_not_supported(self):
""" RSE (CLIENTS): get protocols for operations of rse in unsupported domain (RSEOperationNotSupported)."""
protocol_rse = rse_name_generator()
self.client.add_rse(protocol_rse)
protocols = [{'scheme': 'MOCK',
'hostname': 'localhost',
'port': 17,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 0,
'write': 1,
'delete': 1}},
'extended_attributes': 'TheOneWithAllTheRest'}, ]
# Protocol for read is undefined
for p in protocols:
self.client.add_protocol(protocol_rse, p)
try:
rse_attr = mgr.get_rse_info(rse=protocol_rse, **self.vo)
rse_attr['domain'] = ['wan']
with pytest.raises(exception.RSEProtocolNotSupported):
mgr.select_protocol(rse_attr, 'write')
finally:
self.client.delete_rse(protocol_rse)
def test_get_protocols_defaults_not_supported(self):
""" RSE (CLIENTS): get default protocols for operations of rse (RSEOperationNotSupported)."""
protocol_rse = rse_name_generator()
self.client.add_rse(protocol_rse)
protocols = [{'scheme': 'MOCK_WRITE_DELETE',
'hostname': 'localhost',
'port': 17,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 0,
'write': 1,
'delete': 1}},
'extended_attributes': 'TheOneWithAllTheRest'},
{'scheme': 'MOCK_WRITE_DELETE',
'hostname': 'localhost',
'port': 42,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 0,
'write': 1,
'delete': 1}},
'extended_attributes': 'TheOneWithAllTheRest'},
{'scheme': 'MOCK_DELETE',
'hostname': 'localhost',
'port': 19,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 0,
'write': 0,
'delete': 1}},
'extended_attributes': 'TheOneWithAllTheRest'}, ]
# Protocol for read is undefined
for p in protocols:
self.client.add_protocol(protocol_rse, p)
try:
rse_attr = mgr.get_rse_info(rse=protocol_rse, **self.vo)
rse_attr['domain'] = ['lan']
with pytest.raises(exception.RSEProtocolNotSupported):
mgr.select_protocol(rse_attr, 'read')
finally:
self.client.delete_rse(protocol_rse)
# UPDATE PROTOCOLS
def test_update_protocols_port_exist(self):
""" RSE (CLIENTS): set new values for various protocol attributes."""
protocol_rse = rse_name_generator()
self.client.add_rse(protocol_rse)
protocols = [{'scheme': 'MOCK',
'hostname': 'localhost',
'port': 17,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'wan': {'read': 1,
'write': 1,
'delete': 0}},
'extended_attributes': 'TheOneWithAllTheRest'},
{'scheme': 'MOCK',
'hostname': 'localhost',
'port': 11,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'wan': {'read': 1,
'write': 1,
'delete': 0}},
'extended_attributes': 'TheOneWithAllTheRest'}]
for p in protocols:
self.client.add_protocol(protocol_rse, p)
try:
with pytest.raises(exception.Duplicate):
self.client.update_protocols(protocol_rse, scheme='MOCK', hostname='localhost', port=17, data={'prefix': 'where/the/files/are', 'extended_attributes': 'Something else', 'port': '11'})
finally:
self.client.delete_rse(protocol_rse)
def test_update_protocols_various_attributes(self):
""" RSE (CLIENTS): set new values for various protocol attributes."""
protocol_rse = rse_name_generator()
self.client.add_rse(protocol_rse)
protocols = [{'scheme': 'MOCK',
'hostname': 'localhost',
'port': 17,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 1,
'write': 1,
'delete': 0}},
'extended_attributes': 'TheOneWithAllTheRest'}]
for p in protocols:
self.client.add_protocol(protocol_rse, p)
self.client.update_protocols(protocol_rse, scheme='MOCK', hostname='localhost', port=17, data={'prefix': 'where/the/files/are', 'extended_attributes': 'Something else', 'port': '12'})
rse_attr = mgr.get_rse_info(rse=protocol_rse, **self.vo)
p = mgr.select_protocol(rse_attr, 'read', scheme='MOCK', domain='lan')
if p['prefix'] != 'where/the/files/are' and p['extended_attributes'] != 'Something else':
raise Exception('Update gave unexpected results: %s' % p)
self.client.delete_protocols(protocol_rse, 'MOCK')
self.client.delete_rse(protocol_rse)
def test_swap_protocol(self):
""" RSE (CLIENTS): swaps the priority of two protocols by scheme. """
protocol_rse = rse_name_generator()
self.client.add_rse(protocol_rse)
protocols = [{'scheme': 'MOCKA',
'hostname': 'localhost',
'port': 17,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 1,
'write': 1,
'delete': 0}},
'extended_attributes': 'TheOneWithAllTheRest'},
{'scheme': 'MOCKB',
'hostname': 'localhost',
'port': 42,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 2,
'write': 1,
'delete': 0}},
'extended_attributes': 'TheOneWithAllTheRest'},
{'scheme': 'MOCKC',
'hostname': 'localhost',
'port': 19,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 3,
'write': 0,
'delete': 1}},
'extended_attributes': 'TheOneWithAllTheRest'}, ]
for p in protocols:
self.client.add_protocol(protocol_rse, p)
self.client.swap_protocols(protocol_rse, 'lan', 'read', 'MOCKA', 'MOCKC')
prots = self.client.get_protocols(protocol_rse)
for p in prots:
if p['scheme'] == 'MOCKA':
if p['domains']['lan']['read'] != 3:
print('MOCKA with unexpected priority')
print(prots)
assert(False)
if p['scheme'] == 'MOCKC':
if p['domains']['lan']['read'] != 1:
print('MOCKC with unexpected priority')
print(prots)
assert(False)
assert(True)
def test_update_protocols_rse_not_found(self):
""" RSE (CLIENTS): update all protocols with specific identifier of rse (RSENotFound)."""
with pytest.raises(RSENotFound):
self.client.update_protocols('The One that shouldn\'t be here', scheme='MOCK_Fail', hostname='localhost', port=17, data={'prefix': 'where/the/files/are'})
def test_update_protocols_not_supported(self):
""" RSE (CLIENTS): update all protocols with specific identifier of rse (RSEProtocolNotSupported)."""
protocol_rse = rse_name_generator()
self.client.add_rse(protocol_rse)
protocols = [{'scheme': 'MOCK',
'hostname': 'localhost',
'port': 17,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 1,
'write': 1,
'delete': 0}},
'extended_attributes': 'TheOneWithAllTheRest'},
{'scheme': 'MOCK',
'hostname': 'localhost',
'port': 42,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 0,
'write': 1,
'delete': 0}},
'extended_attributes': 'TheOneWithAllTheRest'},
{'scheme': 'MOCK_DELETE',
'hostname': 'localhost',
'port': 19,
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {
'lan': {'read': 0,
'write': 0,
'delete': 1}},
'extended_attributes': 'TheOneWithAllTheRest'}, ]
for p in protocols:
self.client.add_protocol(protocol_rse, p)
try:
with pytest.raises(exception.RSEProtocolNotSupported):
self.client.update_protocols(protocol_rse, scheme='MOCK_UNDEFINED', hostname='localhost', port=17, data={'delete_lan': 1})
finally:
self.client.delete_rse(protocol_rse)
def test_update_protocols_invalid_value(self):
""" RSE (CLIENTS): update all protocol with invalid value (InvalidObject)."""
protocol_rse = rse_name_generator()
self.client.add_rse(protocol_rse)
attributes = {'scheme': 'MOCK',
'hostname': 'localhost',
'port': 17,
'prefix': '/the/one/with/all/the/files',
# 'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': {'lan': {'read': 1,
'write': 1,
'delete': 0}},
'extended_attributes': 'TheOneWithAllTheRest'}
try:
with pytest.raises(exception.InvalidObject):
self.client.add_protocol(protocol_rse, attributes)
with pytest.raises(exception.RSEProtocolNotSupported):
self.client.update_protocols(protocol_rse, scheme=attributes['scheme'], hostname=attributes['hostname'], port=attributes['port'], data={'impl': None})
finally:
self.client.delete_rse(protocol_rse)
def test_update_protocol_wrong_priority(self):
""" RSE (CLIENTS): Add a protocol with an invalid priority for ranking. """
protocol_rse = rse_name_generator()
self.client.add_rse(protocol_rse)
protocol_ports = [17, 29, 42]
for i in range(3):
self.client.add_protocol(protocol_rse,
{'hostname': 'localhost',
'scheme': 'MOCK',
'port': protocol_ports[i],
'prefix': '/the/one/with/all/the/files',
'impl': 'rucio.rse.protocols.SomeProtocol.SomeImplementation',
'domains': | |
]
[0. 2.5 5. ]
[0. 2.5 5. ]]
>>> print(north)
[[ 0. 0. 0. ]
[ 2.5 2.5 2.5]
[ 5. 5. 5. ]
[ 7.5 7.5 7.5]
[10. 10. 10. ]]
The spacing can be different for northing and easting, respectively:
>>> east, north = grid_coordinates(region=(-5, 1, 0, 10), spacing=(2.5, 1))
>>> print(east.shape, north.shape)
(5, 7) (5, 7)
>>> print(east)
[[-5. -4. -3. -2. -1. 0. 1.]
[-5. -4. -3. -2. -1. 0. 1.]
[-5. -4. -3. -2. -1. 0. 1.]
[-5. -4. -3. -2. -1. 0. 1.]
[-5. -4. -3. -2. -1. 0. 1.]]
>>> print(north)
[[ 0. 0. 0. 0. 0. 0. 0. ]
[ 2.5 2.5 2.5 2.5 2.5 2.5 2.5]
[ 5. 5. 5. 5. 5. 5. 5. ]
[ 7.5 7.5 7.5 7.5 7.5 7.5 7.5]
[10. 10. 10. 10. 10. 10. 10. ]]
If the region can't be divided into the desired spacing, the spacing will
be adjusted to conform to the region:
>>> east, north = grid_coordinates(region=(-5, 0, 0, 5), spacing=2.6)
>>> print(east.shape, north.shape)
(3, 3) (3, 3)
>>> print(east)
[[-5. -2.5 0. ]
[-5. -2.5 0. ]
[-5. -2.5 0. ]]
>>> print(north)
[[0. 0. 0. ]
[2.5 2.5 2.5]
[5. 5. 5. ]]
>>> east, north = grid_coordinates(region=(-5, 0, 0, 5), spacing=2.4)
>>> print(east.shape, north.shape)
(3, 3) (3, 3)
>>> print(east)
[[-5. -2.5 0. ]
[-5. -2.5 0. ]
[-5. -2.5 0. ]]
>>> print(north)
[[0. 0. 0. ]
[2.5 2.5 2.5]
[5. 5. 5. ]]
You can choose to adjust the East and North boundaries of the region
instead:
>>> east, north = grid_coordinates(region=(-5, 0, 0, 5), spacing=2.6,
... adjust='region')
>>> print(east.shape, north.shape)
(3, 3) (3, 3)
>>> print(east)
[[-5. -2.4 0.2]
[-5. -2.4 0.2]
[-5. -2.4 0.2]]
>>> print(north)
[[0. 0. 0. ]
[2.6 2.6 2.6]
[5.2 5.2 5.2]]
>>> east, north = grid_coordinates(region=(-5, 0, 0, 5), spacing=2.4,
... adjust='region')
>>> print(east.shape, north.shape)
(3, 3) (3, 3)
>>> print(east)
[[-5. -2.6 -0.2]
[-5. -2.6 -0.2]
[-5. -2.6 -0.2]]
>>> print(north)
[[0. 0. 0. ]
[2.4 2.4 2.4]
[4.8 4.8 4.8]]
We can optionally generate coordinates for the center of each grid pixel
instead of the corner (default):
>>> east, north = grid_coordinates(region=(0, 5, 0, 10), spacing=2.5,
... pixel_register=True)
>>> # Raise the printing precision for this example
>>> np.set_printoptions(precision=2, suppress=True)
>>> # Notice that the shape is 1 less than when pixel_register=False
>>> print(east.shape, north.shape)
(4, 2) (4, 2)
>>> print(east)
[[1.25 3.75]
[1.25 3.75]
[1.25 3.75]
[1.25 3.75]]
>>> print(north)
[[1.25 1.25]
[3.75 3.75]
[6.25 6.25]
[8.75 8.75]]
>>> east, north = grid_coordinates(region=(0, 5, 0, 10), shape=(4, 2),
... pixel_register=True)
>>> print(east)
[[1.25 3.75]
[1.25 3.75]
[1.25 3.75]
[1.25 3.75]]
>>> print(north)
[[1.25 1.25]
[3.75 3.75]
[6.25 6.25]
[8.75 8.75]]
Generate arrays for other coordinates that have a constant value:
>>> east, north, height = grid_coordinates(
... region=(0, 5, 0, 10), spacing=2.5, extra_coords=57
... )
>>> print(east.shape, north.shape, height.shape)
(5, 3) (5, 3) (5, 3)
>>> print(height)
[[57. 57. 57.]
[57. 57. 57.]
[57. 57. 57.]
[57. 57. 57.]
[57. 57. 57.]]
>>> east, north, height, time = grid_coordinates(
... region=(0, 5, 0, 10), spacing=2.5, extra_coords=[57, 0.1]
... )
>>> print(east.shape, north.shape, height.shape, time.shape)
(5, 3) (5, 3) (5, 3) (5, 3)
>>> print(height)
[[57. 57. 57.]
[57. 57. 57.]
[57. 57. 57.]
[57. 57. 57.]
[57. 57. 57.]]
>>> print(time)
[[0.1 0.1 0.1]
[0.1 0.1 0.1]
[0.1 0.1 0.1]
[0.1 0.1 0.1]
[0.1 0.1 0.1]]
See also
--------
scatter_points : Generate the coordinates for a random scatter of points
profile_coordinates : Coordinates for a profile between two points
"""
check_region(region)
if shape is not None and spacing is not None:
raise ValueError("Both grid shape and spacing provided. Only one is allowed.")
if shape is None and spacing is None:
raise ValueError("Either a grid shape or a spacing must be provided.")
if spacing is not None:
shape, region = spacing_to_shape(region, spacing, adjust)
elif pixel_register:
# Starts by generating grid-line registered coordinates and shifting
# them to the center of the pixel. Need 1 more point if given a shape
# so that we can do that because we discard the last point when
# shifting the coordinates.
shape = tuple(i + 1 for i in shape)
east_lines = np.linspace(region[0], region[1], shape[1])
north_lines = np.linspace(region[2], region[3], shape[0])
if pixel_register:
east_lines = east_lines[:-1] + (east_lines[1] - east_lines[0]) / 2
north_lines = north_lines[:-1] + (north_lines[1] - north_lines[0]) / 2
coordinates = list(np.meshgrid(east_lines, north_lines))
if extra_coords is not None:
for value in np.atleast_1d(extra_coords):
coordinates.append(np.ones_like(coordinates[0]) * value)
return tuple(coordinates)
def spacing_to_shape(region, spacing, adjust):
"""
Convert the grid spacing to a grid shape.
Adjusts the spacing or the region if the desired spacing is not a multiple
of the grid dimensions.
Parameters
----------
region : list = [W, E, S, N]
The boundaries of a given region in Cartesian or geographic
coordinates.
spacing : float, tuple = (s_north, s_east), or None
The grid spacing in the South-North and West-East directions,
respectively. A single value means that the spacing is equal in both
directions.
adjust : {'spacing', 'region'}
Whether to adjust the spacing or the region if required. Ignored if
*shape* is given instead of *spacing*. Defaults to adjusting the
spacing.
Returns
-------
shape, region : tuples
The calculated shape and region that best fits the desired spacing.
Spacing or region may be adjusted.
"""
if adjust not in ["spacing", "region"]:
raise ValueError(
"Invalid value for *adjust* '{}'. Should be 'spacing' or 'region'".format(
adjust
)
)
spacing = np.atleast_1d(spacing)
if len(spacing) == 1:
deast = dnorth = spacing[0]
elif len(spacing) == 2:
dnorth, deast = spacing
else:
raise ValueError(
"Only two values allowed for grid spacing: {}".format(str(spacing))
)
w, e, s, n = region
# Add 1 to get the number of nodes, not segments
nnorth = int(round((n - s) / dnorth)) + 1
neast = int(round((e - w) / deast)) + 1
if adjust == "region":
# The shape is the same but we adjust the region so that the spacing
# isn't altered when we do the linspace.
n = s + (nnorth - 1) * dnorth
e = w + (neast - 1) * deast
return (nnorth, neast), (w, e, s, n)
def shape_to_spacing(region, shape, pixel_register=False):
"""
Calculate the spacing of a grid given region and shape.
Parameters
----------
region : list = [W, E, S, N]
The boundaries of a given region in Cartesian or geographic
coordinates.
shape : tuple = (n_north, n_east) or None
The number of points in the South-North and West-East directions,
respectively.
pixel_register : bool
If True, the coordinates will refer to the center of each grid pixel
instead of the grid lines. In practice, this means that there will be
one less element per dimension of the grid when compared to grid line
registered (only if given *spacing* and not *shape*). Default is False.
Returns
-------
spacing : tuple = (s_north, s_east)
The grid spacing in the South-North and West-East directions,
respectively.
Examples
--------
>>> spacing = shape_to_spacing([0, 10, -5, 1], (7, 11))
>>> print("{:.1f}, {:.1f}".format(*spacing))
1.0, 1.0
>>> spacing = shape_to_spacing([0, 10, -5, 1], (14, 11))
>>> print("{:.1f}, {:.1f}".format(*spacing))
0.5, 1.0
>>> spacing = shape_to_spacing([0, 10, -5, 1], (7, 21))
>>> print("{:.1f}, {:.1f}".format(*spacing))
1.0, 0.5
>>> spacing = shape_to_spacing(
... [-0.5, 10.5, -5.5, 1.5], (7, 11), pixel_register=True,
... )
>>> print("{:.1f}, {:.1f}".format(*spacing))
1.0, 1.0
>>> spacing = shape_to_spacing(
... [-0.25, 10.25, -5.5, 1.5], (7, 21), pixel_register=True,
... )
>>> print("{:.1f}, {:.1f}".format(*spacing))
1.0, 0.5
"""
spacing = []
for i, n_points in enumerate(reversed(shape)):
if not pixel_register:
n_points -= 1
spacing.append((region[2 * i + 1] - region[2 * i]) / n_points)
return tuple(reversed(spacing))
def profile_coordinates(point1, point2, size, extra_coords=None):
"""
Coordinates for a profile along a straight line between two points.
Parameters
----------
point1 : tuple or list
``(easting, | |
"filename" + ".fits" + recipe.inputdir.upper() (including wildcards)
:param recipe: DrsRecipe instance
:param filename: string, the filename to test
:return cond: bool, if True file is valid, if False file is not valid
:return filelist: list of strings, list of files found
:return error: list of strings, if there was an error (cond = False)
and return_error=True, return a list of strings
describing the error
"""
output_files = []
# get drs parameters
params = recipe.drs_params
# get input directory
if directory is not None:
input_dir = str(directory)
else:
# noinspection PyProtectedMember
input_dir = recipe.get_input_dir()
# -------------------------------------------------------------------------
# Step 1: check "filename" as full link to file (including wildcards)
# -------------------------------------------------------------------------
# get glob list of files using glob
raw_files = np.sort(glob.glob(filename))
# debug output
if len(raw_files) == 0:
dargs = [argname, filename]
WLOG(params, 'debug', TextEntry('90-001-00003', args=dargs),
wrap=False)
# if we have file(s) then add them to output files
for raw_file in raw_files:
dargs = [argname, raw_file]
WLOG(params, 'debug', TextEntry('90-001-00004', args=dargs),
wrap=False)
output_files.append(raw_file)
# check if we are finished here
if len(output_files) > 0:
return True, output_files, []
# -------------------------------------------------------------------------
# Step 2: recipe.inputdir.upper() (including wildcards)
# -------------------------------------------------------------------------
# get glob list of files using glob
raw_files = np.sort(glob.glob(os.path.join(input_dir, filename)))
# debug output
if len(raw_files) == 0:
dargs = [argname, filename]
WLOG(params, 'debug', TextEntry('90-001-00003', args=dargs),
wrap=False)
# if we have file(s) then add them to output files
for raw_file in raw_files:
dargs = [argname, raw_file]
WLOG(params, 'debug', TextEntry('90-001-00005', args=dargs),
wrap=False)
output_files.append(raw_file)
# check if we are finished here
if len(output_files) > 0:
return True, output_files, []
# -------------------------------------------------------------------------
# Step 3: check "filename" as full link to file (including wildcards)
# + .fits
# -------------------------------------------------------------------------
# get glob list of files using glob
raw_files = np.sort(glob.glob(filename + '.fits'))
# debug output
if len(raw_files) == 0 and not filename.endswith('.fits'):
dargs = [argname, filename + '.fits']
WLOG(params, 'debug', TextEntry('90-001-00003', args=dargs),
wrap=False)
# if we have file(s) then add them to output files
for raw_file in raw_files:
dargs = [argname, raw_file]
WLOG(params, 'debug', TextEntry('90-001-00006', args=dargs),
wrap=False)
output_files.append(raw_file)
# check if we are finished here
if len(output_files) > 0:
return True, output_files, []
# -------------------------------------------------------------------------
# Step 4: recipe.inputdir.upper() (including wildcards)
# + .fits
# -------------------------------------------------------------------------
# get glob list of files using glob
raw_files = np.sort(glob.glob(os.path.join(input_dir, filename + '.fits')))
# debug output
if len(raw_files) == 0 and not filename.endswith('.fits'):
dargs = [argname, os.path.join(input_dir, filename + '.fits')]
WLOG(params, 'debug', TextEntry('90-001-00003', args=dargs),
wrap=False)
# if we have file(s) then add them to output files
for raw_file in raw_files:
dargs = [argname, raw_file]
WLOG(params, 'debug', TextEntry('90-001-00007', args=dargs),
wrap=False)
output_files.append(raw_file)
# -------------------------------------------------------------------------
# Deal with cases where we didn't find file
# -------------------------------------------------------------------------
eargs = [argname, filename, os.path.join(input_dir, filename)]
emsg = TextEntry('09-001-00005', args=eargs)
if not filename.endswith('.fits'):
fitsfile1 = filename + '.fits'
fitsfile2 = os.path.join(input_dir, fitsfile1)
emsg += TextEntry('\t\t"{0}"'.format(fitsfile1))
emsg += TextEntry('\t\t"{0}"'.format(fitsfile2))
# return False, no files and error messages
return False, None, emsg
def _check_if_directory(argname, files):
"""
Simple check to see if each file in files is a directory and then
checks if file in files is a file with os.path.isfile
:param argname:
:param files:
:return:
"""
# empty error entry
emsgs = TextEntry(None)
# loop around files
it = 0
for filename in files:
# set eargs
eargs = [argname, filename]
# check if directory
if os.path.isdir(filename):
# Need to add as new line
if len(emsgs) > 0:
emsgs += '\n' + TextEntry('09-001-00026', args=eargs)
else:
emsgs += TextEntry('09-001-00026', args=eargs)
continue
# check if not file (or link to file)
if not os.path.isfile(filename) and not os.path.islink(filename):
# Need to add as new line
if len(emsgs) > 0:
emsgs += '\n' + TextEntry('09-001-00025', args=eargs)
else:
emsgs += TextEntry('09-001-00025', args=eargs)
# if we have emsgs then we need to get the errors
if len(emsgs) > 0:
return False, None, emsgs
else:
return True, files, []
def _check_file_exclusivity(recipe, argname, drs_file, logic, outtypes,
alltypelist=None):
# get drs parameters
params = recipe.drs_params
# deal with no alltypelist
if alltypelist is None:
alltypelist = list(outtypes)
else:
alltypelist = list(alltypelist) + list(outtypes)
# if we have no files yet we don't need to check exclusivity
if len(alltypelist) == 0:
dargs = [argname, drs_file.name]
WLOG(params, 'debug', TextEntry('90-001-00013', args=dargs),
wrap=False)
return True, None
# if argument logic is set to "exclusive" we need to check that the
# drs_file.name is the same for this as the last file in outtypes
if logic == 'exclusive':
# match by name of drs_file
cond = drs_file.name == alltypelist[-1].name
# if condition not met return False and error
if not cond:
eargs = [argname, drs_file.name, alltypelist[-1].name]
emsg = TextEntry('09-001-00008', args=eargs)
return False, emsg
# if condition is met return True and empty error
else:
dargs = [argname, drs_file.name]
WLOG(params, 'debug', TextEntry('90-001-00014', args=dargs),
wrap=False)
return True, None
# if logic is 'inclusive' we just need to return True
elif logic == 'inclusive':
WLOG(params, 'debug', TextEntry('90-001-00015', args=[argname]),
wrap=False)
return True, None
# else logic is wrong - raise error
else:
eargs = [argname, recipe.name]
WLOG(params, 'error', TextEntry('00-006-00004', args=eargs),
wrap=False)
# =============================================================================
# Define run making functions
# =============================================================================
def find_run_files(params, recipe, table, args, filters=None,
allowedfibers=None, **kwargs):
# set function name
func_name = display_func(params, 'find_run_files', __NAME__)
# storage for valid files for each argument
filedict = OrderedDict()
# get constants from params
absfile_col = pcheck(params, 'REPROCESS_ABSFILECOL', 'absfile_col',
kwargs, func_name)
check_required = kwargs.get('check_required', False)
# get raw filenames from table
files = table[absfile_col]
# debug log the number of files found
dargs = [func_name, len(files)]
WLOG(params, 'debug', TextEntry('90-503-00011', args=dargs))
# loop around arguments
for argname in args:
# get arg instance
arg = args[argname]
# if check required see if parameter is required
if check_required:
if not arg.required and not arg.reprocess:
continue
# see if we are over writing argument
if argname in recipe.extras:
filedict[argname] = recipe.extras[argname]
continue
# make sure we are only dealing with dtype=files
if arg.dtype not in ['file', 'files']:
# deal with directory (special argument) - if we have a
# master night use the master night as the directory name
if arg.dtype == 'directory' and recipe.master:
filedict[argname] = params['MASTER_NIGHT']
# else set the file dict value to the default value
# TODO: Need a better option for this!!
# TODO: i.e. when we need values to be set from the header
else:
filedict[argname] = arg.default
continue
# add sub-dictionary for each drs file
filedict[argname] = OrderedDict()
# debug log: the argument being scanned
WLOG(params, 'debug', TextEntry('90-503-00012', args=[argname]))
# get drs file instances
drsfiles = arg.files
# if files are None continue
if drsfiles is None:
continue
# ------------------------------------------------------------------
# mask table by filters (if filters in table)
filtermask = np.ones(len(table), dtype=bool)
for tfilter in filters:
if tfilter in table.colnames:
# deal with filter values being list/str
if isinstance(filters[tfilter], str):
testvalues = [filters[tfilter]]
elif isinstance(filters[tfilter], list):
testvalues = filters[tfilter]
else:
continue
# create a test mask
testmask = np.zeros(len(table), dtype=bool)
# loop around test values with OR (could be any value)
for testvalue in testvalues:
# check if value is string
if isinstance(testvalue, str):
values = np.char.array(table[tfilter])
values = values.strip().upper()
testvalue = testvalue.strip().upper()
else:
values = np.array(table[tfilter])
# criteria 1: value == testvalue
vcond1 = values == testvalue
# criteria 2: value in [None, 'None', '']
vcond2 = np.in1d(values, [None, 'None', ''])
# check mask
testmask |= (vcond1 | vcond2)
# add filter to filter mask with AND (must have all filters)
filtermask &= testmask
# ------------------------------------------------------------------
# loop around drs files
for drsfile in drsfiles:
# copy table
ftable = Table(table[filtermask])
ffiles = np.array(files)[filtermask]
# debug log: the file being tested
dargs = [drsfile.name]
WLOG(params, 'debug', TextEntry('90-503-00013', args=dargs))
# define storage (if not already defined)
cond1 = drsfile.name not in filedict[argname]
if cond1 and (arg.filelogic == 'exclusive'):
filedict[argname][drsfile.name] = []
elif 'all' not in filedict[argname]:
filedict[argname]['all'] = []
# list of valid files
valid_infiles = []
valid_outfiles = []
valid_num = 0
# loop around files
| |
+= self.copy_from_reg_to_reg(destination=dividend,
source=src)
# mov eax -> edx
src = ProcessorRegister.accumulator
dest = ProcessorRegister.data
value += self.copy_from_reg_to_reg(destination=dest,
source=src)
# shift edx by 31 -> contains the highest bits of the dividend,
# eax the lowest 31 bits
value += self.shift(ProcessorRegister.data,
ShiftMode.right_arithmetic,
amount=31)
value.append(0xf7) # idiv
mod = 0b11
rm = get_register_encoding(divider)
reg = 7 # F7 /7 -> 7 in the reg field
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
# the result is stored in the acc register, so copy it to the
# correct result register if needed
if destination != ProcessorRegister.accumulator:
register = ProcessorRegister.accumulator
value += self.copy_from_reg_to_reg(register, dividend)
return value
def mul(self, destination, source):
"""Multiply the value of the source by the destination.
destination = source * destination
Args:
source (ProcessorRegister): the source register
destination (ProcessorRegister): the source register
Returns:
bytearray: the machine code
"""
value = bytearray()
if is_single_scalar_reg(destination):
value.extend([0xF3, 0x0F, 0x59]) # mulss
mod = 0b11
reg = get_register_encoding(destination)
rm = get_register_encoding(source)
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
elif is_double_scalar_reg(destination):
value.extend([0xF2, 0x0F, 0x59]) # mulsd
mod = 0b11
reg = get_register_encoding(destination)
rm = get_register_encoding(source)
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
else:
value.extend([0x0F, 0xAF]) # imul
mod = 0b11
reg = get_register_encoding(destination)
rm = get_register_encoding(source)
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
return value
def shift(self, register, mode, amount):
"""Shift the register.
Args:
register (ProcessorRegister): the register to shift
mode (ShiftMode): the mode to shift
amount (int): the shift amount
Returns:
bytearray: the machine code
Raises:
NotImplementedError: if the mode is not yet implemented
"""
value = bytearray()
if mode == ShiftMode.right_arithmetic:
# SAR r/m32, imm8
value.append(0xC1)
mod = 0b11
rm = get_register_encoding(register)
reg = 7 # C1 /7 ib -> 7 in reg field
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
encoded_amount = struct.pack("b", amount)
value += encoded_amount
elif mode == ShiftMode.left_arithmetic:
# SAL r/m32, imm8
value.append(0xC1)
mod = 0b11
rm = get_register_encoding(register)
reg = 4 # C1 /4 ib -> 4 in reg field
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
encoded_amount = struct.pack("b", amount)
value += encoded_amount
else:
raise NotImplementedError
return value
def cmp(self, register_1, register_2):
"""Compare the 2 registers.
Args:
register_1 (ProcessorRegister): the first register
register_2 (ProcessorRegister): the second register
Returns:
bytearray: the machine code
"""
value = bytearray()
# CMP r/m32, r32
value.append(0x39)
mod = 0b11
rm = get_register_encoding(register_1)
reg = get_register_encoding(register_2)
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
return value
def cmp_against_const(self, register, const):
"""Compare the 2 registers.
Args:
register (ProcessorRegister): the register
const (int): the const value
Returns:
bytearray: the machine code
"""
value = bytearray()
# CMP r/m32, imm32
value.append(0x81)
mod = 0b11
rm = get_register_encoding(register)
reg = 7
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
encoded_const = struct.pack("i", const)
value += encoded_const
return value
def je(self, jump_distance):
"""Jump if the equals flag is set.
Args:
jump_distance (int): the distance to jump in bytes
Returns:
bytearray: the machine code
"""
value = bytearray()
# JE rel8
value.append(0x74)
encoded_amount = struct.pack("b", jump_distance)
value += encoded_amount
return value
def jne(self, jump_distance):
"""Jump if the equals flag is not set.
Args:
jump_distance (int): the distance to jump in bytes
Returns:
bytearray: the machine code
"""
value = bytearray()
# 0F 85 cd JNE rel32
value.extend([0x0F, 0x85])
encoded_amount = struct.pack("i", jump_distance)
value += encoded_amount
return value
def jge(self, jump_distance):
"""Jump if the greater or equal flags are set.
Args:
jump_distance (int): the distance to jump in bytes
Returns:
bytearray: the machine code
"""
value = bytearray()
# 0F 8D cd JGE rel32
value.extend([0x0F, 0x8D])
encoded_amount = struct.pack("i", jump_distance)
value += encoded_amount
return value
def jle(self, jump_distance):
"""Jump if the less or equal flags are set.
Args:
jump_distance (int): the distance to jump in bytes
Returns:
bytearray: the machine code
"""
value = bytearray()
# 0F 8E cw JLE rel32
value.extend([0x0F, 0x8E])
encoded_amount = struct.pack("i", jump_distance)
value += encoded_amount
return value
def jg(self, jump_distance):
"""Jump if the greater flags are set.
Args:
jump_distance (int): the distance to jump in bytes
Returns:
bytearray: the machine code
"""
value = bytearray()
# 0F 8F cd JG rel32
value.extend([0x0F, 0x8F])
encoded_amount = struct.pack("i", jump_distance)
value += encoded_amount
return value
def jl(self, jump_distance):
"""Jump if the less flags are set.
Args:
jump_distance (int): the distance to jump in bytes
Returns:
bytearray: the machine code
"""
value = bytearray()
# 0F 8C cd JL rel32
value.extend([0x0F, 0x8C])
encoded_amount = struct.pack("i", jump_distance)
value += encoded_amount
return value
def jmp(self, jump_distance):
"""Jump.
Args:
jump_distance (int): the distance to jump in bytes
Returns:
bytearray: the machine code #noqa I202
"""
value = bytearray()
# JMP rel32
value.append(0xe9)
encoded_amount = struct.pack("i", jump_distance)
value += encoded_amount
return value
def bitwise_and(self, source, destination):
"""Bitwise and the value of the source to the destination.
Args:
source (ProcessorRegister): the source register
destination (ProcessorRegister): the destination register
Returns:
bytearray: the machine code #noqa I202
"""
value = bytearray()
value.append(0x21) # AND r/m32, r32
rm = get_register_encoding(destination)
reg = get_register_encoding(source)
# ModR_byte encoded operands ( ModR/M Byte) MOD 11, RM source and
# REG destination
mod = 0b11
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
return value
def bitwise_or(self, source, destination):
"""Bitwise or the value of the source to the destination.
Args:
source (ProcessorRegister): the source register
destination (ProcessorRegister): the destination register
Returns:
bytearray: the machine code #noqa I202
"""
value = bytearray()
value.append(0x09) # OR r/m32, r32
rm = get_register_encoding(destination)
reg = get_register_encoding(source)
# ModR_byte encoded operands ( ModR/M Byte) MOD 11, RM source and
# REG destination
mod = 0b11
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
return value
def bitwise_xor(self, source, destination):
"""Bitwise xor the value of the source to the destination.
Args:
source (ProcessorRegister): the source register
destination (ProcessorRegister): the destination register
Returns:
bytearray: the machine code #noqa I202
"""
value = bytearray()
value.append(0x31) # XOR r/m32, r32
rm = get_register_encoding(destination)
reg = get_register_encoding(source)
# ModR_byte encoded operands ( ModR/M Byte) MOD 11, RM source and
# REG destination
mod = 0b11
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
return value
def bitwise_not(self, destination):
"""Bitwise xor the value of the source to the destination.
Args:
destination (ProcessorRegister): the destination register
Returns:
bytearray: the machine code #noqa I202
"""
value = bytearray()
value.append(0xf7) # F7 /2 NOT r/m32
rm = get_register_encoding(destination)
reg = 2 # F7 /2 NOT r/m32
# ModR_byte encoded operands ( ModR/M Byte) MOD 11, RM source and
# REG destination
mod = 0b11
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
return value
def logical_and(self, source, destination):
"""Logical and the value of the source to the destination.
Args:
source (ProcessorRegister): the source register
destination (ProcessorRegister): the destination register
Returns:
bytearray: the machine code
"""
value = bytearray()
value.append(0x85) # TEST r/m32, r32
rm = get_register_encoding(destination)
reg = get_register_encoding(source)
# ModR_byte encoded operands ( ModR/M Byte) MOD 11, RM source and
# REG destination
mod = 0b11
modr_byte = (mod << 6) + (reg << 3) + (rm << 0)
value.append(modr_byte)
# clean the destination register, and only if the zero flag is set
# set the bits in the destination register
value += self.copy_value_to_reg(0, destination)
# the zero flag will be set if the and was zero
value += self.setnz(destination)
value += self.movzx(destination, destination)
return value
def setnz(self, destination):
"""Set destination if the zero flag is not set.
Args:
destination (ProcessorRegister): the destination register
Returns:
bytearray: | |
A value indicating whether an existing NIC is allowed to be reused
during failover subject to availability.
:type reuse_existing_nic: bool
:param tfo_recovery_nic_name: The name of the NIC to be used when creating target NICs in TFO.
:type tfo_recovery_nic_name: str
:param tfo_recovery_nic_resource_group_name: The resource group of the NIC to be used when
creating target NICs in TFO.
:type tfo_recovery_nic_resource_group_name: str
:param tfo_reuse_existing_nic: A value indicating whether an existing NIC is allowed to be
reused during test failover subject to availability.
:type tfo_reuse_existing_nic: bool
:param target_nic_name: Target NIC name.
:type target_nic_name: str
"""
_attribute_map = {
'nic_id': {'key': 'nicId', 'type': 'str'},
'replica_nic_id': {'key': 'replicaNicId', 'type': 'str'},
'source_nic_arm_id': {'key': 'sourceNicArmId', 'type': 'str'},
'v_m_network_name': {'key': 'vMNetworkName', 'type': 'str'},
'recovery_vm_network_id': {'key': 'recoveryVMNetworkId', 'type': 'str'},
'ip_configs': {'key': 'ipConfigs', 'type': '[IPConfigDetails]'},
'selection_type': {'key': 'selectionType', 'type': 'str'},
'recovery_network_security_group_id': {'key': 'recoveryNetworkSecurityGroupId', 'type': 'str'},
'enable_accelerated_networking_on_recovery': {'key': 'enableAcceleratedNetworkingOnRecovery', 'type': 'bool'},
'tfo_vm_network_id': {'key': 'tfoVMNetworkId', 'type': 'str'},
'tfo_network_security_group_id': {'key': 'tfoNetworkSecurityGroupId', 'type': 'str'},
'enable_accelerated_networking_on_tfo': {'key': 'enableAcceleratedNetworkingOnTfo', 'type': 'bool'},
'recovery_nic_name': {'key': 'recoveryNicName', 'type': 'str'},
'recovery_nic_resource_group_name': {'key': 'recoveryNicResourceGroupName', 'type': 'str'},
'reuse_existing_nic': {'key': 'reuseExistingNic', 'type': 'bool'},
'tfo_recovery_nic_name': {'key': 'tfoRecoveryNicName', 'type': 'str'},
'tfo_recovery_nic_resource_group_name': {'key': 'tfoRecoveryNicResourceGroupName', 'type': 'str'},
'tfo_reuse_existing_nic': {'key': 'tfoReuseExistingNic', 'type': 'bool'},
'target_nic_name': {'key': 'targetNicName', 'type': 'str'},
}
def __init__(
self,
*,
nic_id: Optional[str] = None,
replica_nic_id: Optional[str] = None,
source_nic_arm_id: Optional[str] = None,
v_m_network_name: Optional[str] = None,
recovery_vm_network_id: Optional[str] = None,
ip_configs: Optional[List["IPConfigDetails"]] = None,
selection_type: Optional[str] = None,
recovery_network_security_group_id: Optional[str] = None,
enable_accelerated_networking_on_recovery: Optional[bool] = None,
tfo_vm_network_id: Optional[str] = None,
tfo_network_security_group_id: Optional[str] = None,
enable_accelerated_networking_on_tfo: Optional[bool] = None,
recovery_nic_name: Optional[str] = None,
recovery_nic_resource_group_name: Optional[str] = None,
reuse_existing_nic: Optional[bool] = False,
tfo_recovery_nic_name: Optional[str] = None,
tfo_recovery_nic_resource_group_name: Optional[str] = None,
tfo_reuse_existing_nic: Optional[bool] = False,
target_nic_name: Optional[str] = None,
**kwargs
):
super(VMNicDetails, self).__init__(**kwargs)
self.nic_id = nic_id
self.replica_nic_id = replica_nic_id
self.source_nic_arm_id = source_nic_arm_id
self.v_m_network_name = v_m_network_name
self.recovery_vm_network_id = recovery_vm_network_id
self.ip_configs = ip_configs
self.selection_type = selection_type
self.recovery_network_security_group_id = recovery_network_security_group_id
self.enable_accelerated_networking_on_recovery = enable_accelerated_networking_on_recovery
self.tfo_vm_network_id = tfo_vm_network_id
self.tfo_network_security_group_id = tfo_network_security_group_id
self.enable_accelerated_networking_on_tfo = enable_accelerated_networking_on_tfo
self.recovery_nic_name = recovery_nic_name
self.recovery_nic_resource_group_name = recovery_nic_resource_group_name
self.reuse_existing_nic = reuse_existing_nic
self.tfo_recovery_nic_name = tfo_recovery_nic_name
self.tfo_recovery_nic_resource_group_name = tfo_recovery_nic_resource_group_name
self.tfo_reuse_existing_nic = tfo_reuse_existing_nic
self.target_nic_name = target_nic_name
class VMNicInputDetails(msrest.serialization.Model):
"""Hyper V VM network input details.
:param nic_id: The nic Id.
:type nic_id: str
:param ip_configs: The IP configurations to be used by NIC during test failover and failover.
:type ip_configs: list[~azure.mgmt.recoveryservicessiterecovery.models.IPConfigInputDetails]
:param selection_type: Selection type for failover.
:type selection_type: str
:param recovery_network_security_group_id: The id of the NSG associated with the NIC.
:type recovery_network_security_group_id: str
:param enable_accelerated_networking_on_recovery: Whether the NIC has accelerated networking
enabled.
:type enable_accelerated_networking_on_recovery: bool
:param tfo_network_security_group_id: The NSG to be used by NIC during test failover.
:type tfo_network_security_group_id: str
:param enable_accelerated_networking_on_tfo: Whether the test NIC has accelerated networking
enabled.
:type enable_accelerated_networking_on_tfo: bool
:param recovery_nic_name: The name of the NIC to be used when creating target NICs.
:type recovery_nic_name: str
:param recovery_nic_resource_group_name: The resource group of the NIC to be used when creating
target NICs.
:type recovery_nic_resource_group_name: str
:param reuse_existing_nic: A value indicating whether an existing NIC is allowed to be reused
during failover subject to availability.
:type reuse_existing_nic: bool
:param tfo_nic_name: The name of the NIC to be used when creating target NICs in TFO.
:type tfo_nic_name: str
:param tfo_nic_resource_group_name: The resource group of the NIC to be used when creating
target NICs in TFO.
:type tfo_nic_resource_group_name: str
:param tfo_reuse_existing_nic: A value indicating whether an existing NIC is allowed to be
reused during test failover subject to availability.
:type tfo_reuse_existing_nic: bool
:param target_nic_name: Target NIC name.
:type target_nic_name: str
"""
_attribute_map = {
'nic_id': {'key': 'nicId', 'type': 'str'},
'ip_configs': {'key': 'ipConfigs', 'type': '[IPConfigInputDetails]'},
'selection_type': {'key': 'selectionType', 'type': 'str'},
'recovery_network_security_group_id': {'key': 'recoveryNetworkSecurityGroupId', 'type': 'str'},
'enable_accelerated_networking_on_recovery': {'key': 'enableAcceleratedNetworkingOnRecovery', 'type': 'bool'},
'tfo_network_security_group_id': {'key': 'tfoNetworkSecurityGroupId', 'type': 'str'},
'enable_accelerated_networking_on_tfo': {'key': 'enableAcceleratedNetworkingOnTfo', 'type': 'bool'},
'recovery_nic_name': {'key': 'recoveryNicName', 'type': 'str'},
'recovery_nic_resource_group_name': {'key': 'recoveryNicResourceGroupName', 'type': 'str'},
'reuse_existing_nic': {'key': 'reuseExistingNic', 'type': 'bool'},
'tfo_nic_name': {'key': 'tfoNicName', 'type': 'str'},
'tfo_nic_resource_group_name': {'key': 'tfoNicResourceGroupName', 'type': 'str'},
'tfo_reuse_existing_nic': {'key': 'tfoReuseExistingNic', 'type': 'bool'},
'target_nic_name': {'key': 'targetNicName', 'type': 'str'},
}
def __init__(
self,
*,
nic_id: Optional[str] = None,
ip_configs: Optional[List["IPConfigInputDetails"]] = None,
selection_type: Optional[str] = None,
recovery_network_security_group_id: Optional[str] = None,
enable_accelerated_networking_on_recovery: Optional[bool] = None,
tfo_network_security_group_id: Optional[str] = None,
enable_accelerated_networking_on_tfo: Optional[bool] = None,
recovery_nic_name: Optional[str] = None,
recovery_nic_resource_group_name: Optional[str] = None,
reuse_existing_nic: Optional[bool] = None,
tfo_nic_name: Optional[str] = None,
tfo_nic_resource_group_name: Optional[str] = None,
tfo_reuse_existing_nic: Optional[bool] = None,
target_nic_name: Optional[str] = None,
**kwargs
):
super(VMNicInputDetails, self).__init__(**kwargs)
self.nic_id = nic_id
self.ip_configs = ip_configs
self.selection_type = selection_type
self.recovery_network_security_group_id = recovery_network_security_group_id
self.enable_accelerated_networking_on_recovery = enable_accelerated_networking_on_recovery
self.tfo_network_security_group_id = tfo_network_security_group_id
self.enable_accelerated_networking_on_tfo = enable_accelerated_networking_on_tfo
self.recovery_nic_name = recovery_nic_name
self.recovery_nic_resource_group_name = recovery_nic_resource_group_name
self.reuse_existing_nic = reuse_existing_nic
self.tfo_nic_name = tfo_nic_name
self.tfo_nic_resource_group_name = tfo_nic_resource_group_name
self.tfo_reuse_existing_nic = tfo_reuse_existing_nic
self.target_nic_name = target_nic_name
class VmNicUpdatesTaskDetails(TaskTypeDetails):
"""This class represents the vm NicUpdates task details.
All required parameters must be populated in order to send to Azure.
:param instance_type: Required. The type of task details.Constant filled by server.
:type instance_type: str
:param vm_id: Virtual machine Id.
:type vm_id: str
:param nic_id: Nic Id.
:type nic_id: str
:param name: Name of the Nic.
:type name: str
"""
_validation = {
'instance_type': {'required': True},
}
_attribute_map = {
'instance_type': {'key': 'instanceType', 'type': 'str'},
'vm_id': {'key': 'vmId', 'type': 'str'},
'nic_id': {'key': 'nicId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
vm_id: Optional[str] = None,
nic_id: Optional[str] = None,
name: Optional[str] = None,
**kwargs
):
super(VmNicUpdatesTaskDetails, self).__init__(**kwargs)
self.instance_type = 'VmNicUpdatesTaskDetails' # type: str
self.vm_id = vm_id
self.nic_id = nic_id
self.name = name
class VMwareCbtContainerCreationInput(ReplicationProviderSpecificContainerCreationInput):
"""VMwareCbt container creation input.
All required parameters must be populated in order to send to Azure.
:param instance_type: Required. The class type.Constant filled by server.
:type instance_type: str
"""
_validation = {
'instance_type': {'required': True},
}
_attribute_map = {
'instance_type': {'key': 'instanceType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VMwareCbtContainerCreationInput, self).__init__(**kwargs)
self.instance_type = 'VMwareCbt' # type: str
class VMwareCbtContainerMappingInput(ReplicationProviderSpecificContainerMappingInput):
"""VMwareCbt container mapping input.
All required parameters must be populated in order to send to Azure.
:param instance_type: Required. The class type.Constant filled by server.
:type instance_type: str
:param key_vault_id: Required. The target key vault ARM Id.
:type key_vault_id: str
:param key_vault_uri: Required. The target key vault URL.
:type key_vault_uri: str
:param storage_account_id: Required. The storage account ARM Id.
:type storage_account_id: str
:param storage_account_sas_secret_name: Required. The secret name of the storage account.
:type storage_account_sas_secret_name: str
:param service_bus_connection_string_secret_name: Required. The secret name of the service bus
connection string.
:type service_bus_connection_string_secret_name: str
:param target_location: Required. The target location.
:type target_location: str
"""
_validation = {
'instance_type': {'required': True},
'key_vault_id': {'required': True},
'key_vault_uri': {'required': True},
'storage_account_id': {'required': True},
'storage_account_sas_secret_name': {'required': True},
'service_bus_connection_string_secret_name': {'required': True},
'target_location': {'required': True},
}
_attribute_map = {
'instance_type': {'key': 'instanceType', 'type': 'str'},
'key_vault_id': {'key': 'keyVaultId', 'type': 'str'},
'key_vault_uri': {'key': 'keyVaultUri', 'type': 'str'},
'storage_account_id': {'key': 'storageAccountId', 'type': 'str'},
'storage_account_sas_secret_name': {'key': 'storageAccountSasSecretName', 'type': 'str'},
'service_bus_connection_string_secret_name': {'key': 'serviceBusConnectionStringSecretName', 'type': 'str'},
'target_location': {'key': 'targetLocation', 'type': 'str'},
}
def __init__(
self,
*,
key_vault_id: str,
key_vault_uri: str,
storage_account_id: str,
storage_account_sas_secret_name: str,
service_bus_connection_string_secret_name: str,
target_location: str,
**kwargs
):
super(VMwareCbtContainerMappingInput, self).__init__(**kwargs)
self.instance_type = 'VMwareCbt' # type: str
self.key_vault_id = key_vault_id
self.key_vault_uri = key_vault_uri
self.storage_account_id = storage_account_id
self.storage_account_sas_secret_name = storage_account_sas_secret_name
self.service_bus_connection_string_secret_name = service_bus_connection_string_secret_name
self.target_location = target_location
class VMwareCbtDiskInput(msrest.serialization.Model):
"""VMwareCbt disk input.
All required parameters must be populated in order to send to Azure.
:param disk_id: Required. The disk Id.
:type disk_id: str
:param disk_type: The disk type. Possible values include: "Standard_LRS", "Premium_LRS",
"StandardSSD_LRS".
:type disk_type: str or ~azure.mgmt.recoveryservicessiterecovery.models.DiskAccountType
:param is_os_disk: Required. A value indicating whether the disk is the OS disk.
:type is_os_disk: str
:param log_storage_account_id: Required. The log storage account ARM Id.
:type log_storage_account_id: str
:param log_storage_account_sas_secret_name: Required. The key vault secret name of the log
storage account.
:type log_storage_account_sas_secret_name: str
:param disk_encryption_set_id: The DiskEncryptionSet ARM Id.
:type disk_encryption_set_id: str
"""
_validation = {
'disk_id': {'required': True},
'is_os_disk': {'required': True},
'log_storage_account_id': {'required': True},
'log_storage_account_sas_secret_name': {'required': True},
}
_attribute_map = {
'disk_id': {'key': 'diskId', 'type': 'str'},
'disk_type': {'key': 'diskType', 'type': 'str'},
'is_os_disk': {'key': 'isOSDisk', 'type': 'str'},
'log_storage_account_id': {'key': 'logStorageAccountId', 'type': 'str'},
'log_storage_account_sas_secret_name': {'key': 'logStorageAccountSasSecretName', 'type': 'str'},
'disk_encryption_set_id': {'key': 'diskEncryptionSetId', 'type': 'str'},
}
def __init__(
self,
*,
disk_id: str,
| |
<filename>methtuple/funcs.py<gh_stars>1-10
from __future__ import print_function
from .mtuple import *
import re
import csv
import operator
import sys
import itertools
#### Function definitions ####
def make_ignores_list(ic):
"""Make a list from a string of read positions that are to be ignored. Input should be 1-based co-ordinates and these are converted to 0-based co-ordinates.
Args:
ic: A string of read positions to ignore. Multiple values should be comma-delimited, ranges can be specified by use of the hyphen and all positions should use 1-based co-ordinates. For example:
'1-5, 80, 98-100'
corresponds to ignoring read-positions 1, 2, 3, 4, 5, 80, 98, 99, 100 and so returns [0, 1, 2, 3, 4, 79, 97, 98, 99].
Returns:
A Python list of the 0-based positions to be ignored.
"""
if ic is None:
val = []
else:
val = []
y = [x.strip() for x in ic.split(',')]
for i in y:
z = [x.strip() for x in i.split('-')]
if len(z) == 2:
val = val + list(range(int(z[0]), int(z[1]) + 1))
elif len(z) == 1:
val = val + [int(z[0])]
else:
exit_msg = ''.join(['ERROR: -ir1p and -ir2p must be comma-delimited. Ranges can be specified by use of the hyphen and all positions should use 1-based co-ordinates, e.g. \'1-5, 80, 98-100\'.'])
sys.exit(exit_msg)
if not all(isinstance(i, int) for i in val):
exit_msg = ''.join(['ERROR: -ir1p and -ir2p must be comma-delimited. Ranges can be specified by use of the hyphen and all positions should use 1-based co-ordinates, e.g. \'1-5, 80, 98-100\'.'])
sys.exit(exit_msg)
# Convert from 1-based to 0-based co-ordinates
val = [x - 1 for x in val]
return val
def ignore_read_pos(read, methylation_index, ignore_read_pos_list):
"""Ignore methylation loci in a read that appear in the ignore_read_pos_list. A methylation locus may be one of CpG, CHH, CHG or CNN.
Args:
read: A pysam.AlignedSegment instance.
methylation_index: A list of zero-based indices. Each index corresponds to the leftmost aligned position of a methylation locus in a read. For example:
[0, 5]
corresponds to a read with a methylation locus at the first and sixth positions of the read.
ignore_read_pos_list: The list of read positions to be ignored.
Returns:
An updated version of methylation_index. Will report a warning if the FLAG does not encode whether the read is part of a paired-end or which mate of the paired-end read it is. Will report an error and call sys.exit() if the XR-tag or XG-tag is incompatible or missing.
"""
# NOTE: Assumes that paired-end reads have FR orientation, which is always true for Bismark but might not be for other aligners
strand = get_strand(read)
# Single-end reads
if not read.is_paired:
if strand == '+':
mi_updated = [mi for mi in methylation_index if mi not in ignore_read_pos_list]
elif strand == '-':
ignore_read_pos_list = [read.query_length - ic - 1 for ic in ignore_read_pos_list]
mi_updated = [mi for mi in methylation_index if mi not in ignore_read_pos_list]
# Paired-end reads: read_1
elif read.is_paired and read.is_read1:
if strand == '+':
mi_updated = [mi for mi in methylation_index if mi not in ignore_read_pos_list]
elif strand == '-':
ignore_read_pos_list = [read.query_length - ic - 1 for ic in ignore_read_pos_list]
mi_updated = [mi for mi in methylation_index if mi not in ignore_read_pos_list]
# Paired-end reads: read_2
elif read.is_paired and read.is_read2:
if strand == '+':
ignore_read_pos_list = [read.query_length - ic - 1 for ic in ignore_read_pos_list]
mi_updated = [mi for mi in methylation_index if mi not in ignore_read_pos_list]
if strand == '-':
mi_updated = [mi for mi in methylation_index if mi not in ignore_read_pos_list]
# Return updated methylation_index
return mi_updated
def ignore_low_quality_bases(read, methylation_index, min_qual, phred_offset):
"""Ignore low quality bases of a read that contribute to a read's methylation_index.
Args:
read: A pysam.AlignedSegment instance.
methylation_index: A list of zero-based indices. Each index corresponds to the leftmost aligned position of a methylation locus in a read. For example:
[0, 5]
corresponds to a read with a methylation locus at the first and sixth positions of the read.
min_qual: The minimum base quality (integer). All bases with quality < min_qual are excluded from the returned methylation_index instance.
phred_offset: The Phred offset of the data (33 or 64).
Returns:
An updated version of methylation_index.
"""
if (min_qual < 0) or (round(min_qual) != min_qual):
raise ValueError("ignore_low_quality_bases: 'low_qual' must be a positive integer")
if phred_offset != 33 and phred_offset != 64:
raise ValueError("ignore_low_quality_bases: 'phred_offset' must be a 33 or 64")
# As of pysam v0.8.1, pysam.AlignedSegment.query_qualities now returns "a python array of unsigned chars" and "no offset of 33 needs to be subtracted" (http://pysam.readthedocs.org/en/latest/api.html#api). Therefore phred64 encoded base qualities now need to have an offset 64 - 33 = 31 subtracted and phred33 base qualities do not need any offset subtracted (0 is subtracted, which is a no-op, to minimise code changes).
phred_offset = phred_offset - 33
ignore_these_bases = []
for i in methylation_index:
if (read.query_qualities[i] - phred_offset) < min_qual:
ignore_these_bases.append(i)
return [x for x in methylation_index if x not in ignore_these_bases]
def fix_old_bismark(read):
"""Fix the QNAME and FLAG field of a paired-end read from a SAM/BAM file generated by Bismark version < 0.8.3
Args:
read: A pysam.AlignedSegment instance.
Returns:
An updated version of the read.
"""
# Strip '/1' or '/2' appended to the end of QNAMEs by Bismark version < 0.8.3. Assumes there are no forward slash characters in the QNAME field
read.query_name = read.query_name.split('/')[0]
# Fix FLAG value
if read.flag == 67:
read.flag = 99
elif read.flag == 115:
read.flag = 83
elif read.flag == 131:
read.flag = 147
elif read.flag == 179:
read.flag = 163
else:
exit_msg = ''.join(['ERROR: Unexpected FLAG (', str(read.flag), ') for read ', read.query_name, 'Sorry, --aligner Bismark_old is unable to deal with this FLAG. Please file an issue at www.github.com/PeteHaitch/methtuple describing the error..'])
sys.exit(exit_msg)
return read
def does_read_contain_complicated_cigar(read):
"""Check whether a read contains a complicated CIGAR string character, defined as anything other than a match (M; 0), insertion (I; 1), deletion (D; 2), soft-clip (S, 4) or hard-clip (H, 5).
Args:
read: A pysam.AlignedSegment instance.
Returns:
True if read contains an complicated CIGAR string character, False otherwise.
"""
val = any([x[0] not in [0, 1, 2, 4, 5] for x in read.cigartuples])
return val
def extract_and_update_methylation_index_from_single_end_read(read, AlignmentFile, methylation_m_tuples, m, all_combinations, methylation_type, methylation_pattern, ignore_read_1_pos, min_qual, phred_offset, ob_strand_offset):
"""Extracts m-tuples of methylation loci from a single-end read and adds the comethylation m-tuple to the methylation_m_tuples object.
Args:
read: An AlignedSegment instance corresponding to a single-end read.
AlignmentFile: The AlignmentFile instance corresponding to the sample. Required in order to extract chromosome names from read.
methylation_m_tuples: An MTuple instance.
m: Is the "m" in "m-tuple", i.e. the size of the m-tuple. m must be an integer greater than or equal to 1. WARNING: No error or warning produced if this condition is violated.
all_combinations: A boolean indicating whether all combinations of m-tuples should be created or just "neighbouring" m-tuples.
methylation_type: A string of the methylation type, e.g. CG for CpG methylation. Must be a valid option for the MTuple class.
methylation_pattern: A regular expression of the methylation loci, e.g. '[Zz]' for CpG-methylation
ignore_read_1_pos: Ignore this list of read positions from each read.
min_qual: Ignore bases with quality-score less than this value.
phred_offset: The offset in the Phred scores. Phred33 corresponds to phred_offset = 33 and Phred64 corresponds to phred_offset = 64.
ob_strand_offset: How many bases a methylation loci on the OB-strand must be moved to the left in order to line up with the C on the OT-strand; e.g. ob_strand_offset = 1 for CpGs.
Returns:
methylation_m_tuples: An updated version of methylation_m_tuples.
n_methylation_loci: The number of methylation loci extracted from the read.
"""
# Identify methylation events in read, e.g. CpGs or CHHs. The methylation_pattern is specified by a command line argument (e.g. Z/z corresponds to CpG)
methylation_index = [midx.start() for midx in re.finditer(methylation_pattern, read.get_tag('XM'))]
# Ignore any read positions specified in ignore_read_1_pos
methylation_index = ignore_read_pos(read, methylation_index, ignore_read_1_pos)
# Ignore any positions with a base quality less than min_qual
methylation_index = ignore_low_quality_bases(read, methylation_index, min_qual, phred_offset)
n_methylation_loci = len(methylation_index)
strand = get_strand(read)
# Call methylation m-tuples if there are sufficient methylation loci in the | |
<filename>yinyang/src/parsing/SMTLIBv2Visitor.py
# MIT License
#
# Copyright (c) [2020 - 2021] The yinyang authors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Generated from SMTLIBv2.g4 by ANTLR 4.8
from antlr4 import *
from yinyang.src.parsing.Ast import *
from yinyang.src.parsing.SMTLIBv2Parser import SMTLIBv2Parser
# This class defines a complete generic visitor for a parse tree produced by SMTLIBv2Parser.
class SMTLIBv2Visitor(ParseTreeVisitor):
# Visit a parse tree produced by SMTLIBv2Parser#response.
def visitResponse(self, ctx: SMTLIBv2Parser.ResponseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#generalReservedWord.
def visitGeneralReservedWord(self, ctx: SMTLIBv2Parser.GeneralReservedWordContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#simpleSymbol.
def visitSimpleSymbol(self, ctx: SMTLIBv2Parser.SimpleSymbolContext):
return ctx.getText()
# Visit a parse tree produced by SMTLIBv2Parser#quotedSymbol.
def visitQuotedSymbol(self, ctx: SMTLIBv2Parser.QuotedSymbolContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#predefSymbol.
def visitPredefSymbol(self, ctx: SMTLIBv2Parser.PredefSymbolContext):
return ctx.getText()
# Visit a parse tree produced by SMTLIBv2Parser#predefKeyword.
def visitPredefKeyword(self, ctx: SMTLIBv2Parser.PredefKeywordContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#symbol.
def visitSymbol(self, ctx: SMTLIBv2Parser.SymbolContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#numeral.
def visitNumeral(self, ctx: SMTLIBv2Parser.NumeralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#decimal.
def visitDecimal(self, ctx: SMTLIBv2Parser.DecimalContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#hexadecimal.
def visitHexadecimal(self, ctx: SMTLIBv2Parser.HexadecimalContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#binary.
def visitBinary(self, ctx: SMTLIBv2Parser.BinaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#string.
def visitString(self, ctx: SMTLIBv2Parser.StringContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#keyword.
def visitKeyword(self, ctx: SMTLIBv2Parser.KeywordContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#spec_constant.
def visitSpec_constant(self, ctx: SMTLIBv2Parser.Spec_constantContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#s_expr.
def visitS_expr(self, ctx: SMTLIBv2Parser.S_exprContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#index.
def visitIndex(self, ctx: SMTLIBv2Parser.IndexContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#identifier.
def visitIdentifier(self, ctx: SMTLIBv2Parser.IdentifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#attribute_value.
def visitAttribute_value(self, ctx: SMTLIBv2Parser.Attribute_valueContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#attribute.
def visitAttribute(self, ctx: SMTLIBv2Parser.AttributeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#qual_identifier.
def visitQual_identifier(self, ctx: SMTLIBv2Parser.Qual_identifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#var_binding.
def visitVar_binding(self, ctx: SMTLIBv2Parser.Var_bindingContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#sorted_var.
def visitSorted_var(self, ctx: SMTLIBv2Parser.Sorted_varContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#pattern.
def visitPattern(self, ctx: SMTLIBv2Parser.PatternContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#match_case.
def visitMatch_case(self, ctx: SMTLIBv2Parser.Match_caseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#sort_symbol_decl.
def visitSort_symbol_decl(self, ctx: SMTLIBv2Parser.Sort_symbol_declContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#meta_spec_constant.
def visitMeta_spec_constant(self, ctx: SMTLIBv2Parser.Meta_spec_constantContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#fun_symbol_decl.
def visitFun_symbol_decl(self, ctx: SMTLIBv2Parser.Fun_symbol_declContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#par_fun_symbol_decl.
def visitPar_fun_symbol_decl(self, ctx: SMTLIBv2Parser.Par_fun_symbol_declContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#theory_attribute.
def visitTheory_attribute(self, ctx: SMTLIBv2Parser.Theory_attributeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#theory_decl.
def visitTheory_decl(self, ctx: SMTLIBv2Parser.Theory_declContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#logic_attribue.
def visitLogic_attribue(self, ctx: SMTLIBv2Parser.Logic_attribueContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#logic.
def visitLogic(self, ctx: SMTLIBv2Parser.LogicContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#sort_dec.
def visitSort_dec(self, ctx: SMTLIBv2Parser.Sort_decContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#selector_dec.
def visitSelector_dec(self, ctx: SMTLIBv2Parser.Selector_decContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#constructor_dec.
def visitConstructor_dec(self, ctx: SMTLIBv2Parser.Constructor_decContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#datatype_dec.
def visitDatatype_dec(self, ctx: SMTLIBv2Parser.Datatype_decContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#function_dec.
def visitFunction_dec(self, ctx: SMTLIBv2Parser.Function_decContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#function_def.
def visitFunction_def(self, ctx: SMTLIBv2Parser.Function_defContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#prop_literal.
def visitProp_literal(self, ctx: SMTLIBv2Parser.Prop_literalContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_assert.
def visitCmd_assert(self, ctx: SMTLIBv2Parser.Cmd_assertContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_checkSat.
def visitCmd_checkSat(self, ctx: SMTLIBv2Parser.Cmd_checkSatContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_checkSatAssuming.
def visitCmd_checkSatAssuming(
self, ctx: SMTLIBv2Parser.Cmd_checkSatAssumingContext
):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_declareConst.
def visitCmd_declareConst(self, ctx: SMTLIBv2Parser.Cmd_declareConstContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_declareDatatype.
def visitCmd_declareDatatype(self, ctx: SMTLIBv2Parser.Cmd_declareDatatypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_declareDatatypes.
def visitCmd_declareDatatypes(
self, ctx: SMTLIBv2Parser.Cmd_declareDatatypesContext
):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_declareFun.
def visitCmd_declareFun(self, ctx: SMTLIBv2Parser.Cmd_declareFunContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_declareSort.
def visitCmd_declareSort(self, ctx: SMTLIBv2Parser.Cmd_declareSortContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_defineFun.
def visitCmd_defineFun(self, ctx: SMTLIBv2Parser.Cmd_defineFunContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_defineFunRec.
def visitCmd_defineFunRec(self, ctx: SMTLIBv2Parser.Cmd_defineFunRecContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_defineFunsRec.
def visitCmd_defineFunsRec(self, ctx: SMTLIBv2Parser.Cmd_defineFunsRecContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_defineSort.
def visitCmd_defineSort(self, ctx: SMTLIBv2Parser.Cmd_defineSortContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_echo.
def visitCmd_echo(self, ctx: SMTLIBv2Parser.Cmd_echoContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_exit.
def visitCmd_exit(self, ctx: SMTLIBv2Parser.Cmd_exitContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_getAssertions.
def visitCmd_getAssertions(self, ctx: SMTLIBv2Parser.Cmd_getAssertionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_getAssignment.
def visitCmd_getAssignment(self, ctx: SMTLIBv2Parser.Cmd_getAssignmentContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_getInfo.
def visitCmd_getInfo(self, ctx: SMTLIBv2Parser.Cmd_getInfoContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_getModel.
def visitCmd_getModel(self, ctx: SMTLIBv2Parser.Cmd_getModelContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_getOption.
def visitCmd_getOption(self, ctx: SMTLIBv2Parser.Cmd_getOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_getProof.
def visitCmd_getProof(self, ctx: SMTLIBv2Parser.Cmd_getProofContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_getUnsatAssumptions.
def visitCmd_getUnsatAssumptions(
self, ctx: SMTLIBv2Parser.Cmd_getUnsatAssumptionsContext
):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_getUnsatCore.
def visitCmd_getUnsatCore(self, ctx: SMTLIBv2Parser.Cmd_getUnsatCoreContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_getValue.
def visitCmd_getValue(self, ctx: SMTLIBv2Parser.Cmd_getValueContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_pop.
def visitCmd_pop(self, ctx: SMTLIBv2Parser.Cmd_popContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_push.
def visitCmd_push(self, ctx: SMTLIBv2Parser.Cmd_pushContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_reset.
def visitCmd_reset(self, ctx: SMTLIBv2Parser.Cmd_resetContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_resetAssertions.
def visitCmd_resetAssertions(self, ctx: SMTLIBv2Parser.Cmd_resetAssertionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_setInfo.
def visitCmd_setInfo(self, ctx: SMTLIBv2Parser.Cmd_setInfoContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_setLogic.
def visitCmd_setLogic(self, ctx: SMTLIBv2Parser.Cmd_setLogicContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#cmd_setOption.
def visitCmd_setOption(self, ctx: SMTLIBv2Parser.Cmd_setOptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#b_value.
def visitB_value(self, ctx: SMTLIBv2Parser.B_valueContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#option.
def visitOption(self, ctx: SMTLIBv2Parser.OptionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#info_flag.
def visitInfo_flag(self, ctx: SMTLIBv2Parser.Info_flagContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#error_behaviour.
def visitError_behaviour(self, ctx: SMTLIBv2Parser.Error_behaviourContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#reason_unknown.
def visitReason_unknown(self, ctx: SMTLIBv2Parser.Reason_unknownContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#model_response.
def visitModel_response(self, ctx: SMTLIBv2Parser.Model_responseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SMTLIBv2Parser#info_response.
def visitInfo_response(self, ctx: SMTLIBv2Parser.Info_responseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by | |
name.
Args:
file_name: file name for checkpoint
Returns:
None
"""
def _cast(t: torch.Tensor):
if t.dtype == torch.float16:
return t.to(dtype=torch.float32)
else:
return t
checkpoint = {
"state_dict": {
k: _cast(v) for k, v in self.agent.state_dict().items()
},
# FIXME optim state, should I cast it?
"optim_state": self.agent.optim_state_dict(),
# "optim_state": {
# k: _cast(v) for k, v in self.agent.optimizer.state_dict().items()
# },
# "state_dict": self.agent.state_dict(),
# "optim_state": self.agent.optimizer.state_dict(),
"config": self.config,
}
if extra_state is not None:
checkpoint["extra_state"] = extra_state
os.makedirs(self.config.CHECKPOINT_FOLDER, exist_ok=True)
torch.save(
checkpoint, os.path.join(self.config.CHECKPOINT_FOLDER, file_name)
)
def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:
r"""Load checkpoint of specified path as a dict.
Args:
checkpoint_path: path of target checkpoint
*args: additional positional args
**kwargs: additional keyword args
Returns:
dict containing checkpoint info
"""
return torch.load(checkpoint_path, *args, **kwargs)
METRICS_BLACKLIST = {"top_down_map", "collisions.is_collision"}
@classmethod
def _extract_scalars_from_info(
cls, info: Dict[str, Any]
) -> Dict[str, float]:
result = {}
for k, v in info.items():
if k in cls.METRICS_BLACKLIST:
continue
if isinstance(v, dict):
result.update(
{
k + "." + subk: subv
for subk, subv in cls._extract_scalars_from_info(
v
).items()
if (k + "." + subk) not in cls.METRICS_BLACKLIST
}
)
# Things that are scalar-like will have an np.size of 1.
# Strings also have an np.size of 1, so explicitly ban those
elif np.size(v) == 1 and not isinstance(v, str):
result[k] = float(v)
return result
@classmethod
def _extract_scalars_from_infos(
cls, infos: List[Dict[str, Any]]
) -> Dict[str, List[float]]:
results = defaultdict(list)
for i in range(len(infos)):
for k, v in cls._extract_scalars_from_info(infos[i]).items():
results[k].append(v)
return results
def _get_policy_head_count(self):
reward_keys = self.config.RL.POLICIES
if reward_keys[0] == "none" and len(reward_keys) == 1:
return 1
if self.config.RL.REWARD_FUSION.STRATEGY == "SPLIT":
return 2
return 1
def _build_rewards(
self,
env_rewards,
metrics,
):
r"""
In order to support more complex reward operations, we treat rewards as normal metrics.
The env still returns rewards as per gym API, but env reward should be combined with measures as configured.
Typically, the env reward will just contain slack.
Args:
env_rewards: [b] env reward
metrics: dict of [b] (reward) measures.
Note these sizes don't have extra feature dims since rewards are expected to be scalars.
Return:
env_rewards: k x b, where k is the number of policy heads (typically 1)
"""
# extract the reward metrics
reward_keys = self.config.RL.POLICIES
if reward_keys[0] == "none" and len(reward_keys) == 1:
return env_rewards.unsqueeze(0)
strategy = self.config.RL.REWARD_FUSION.STRATEGY
if strategy == "SUM":
return (env_rewards + sum(metrics[p] for p in reward_keys)).unsqueeze(0)
reward_a = sum(metrics[p] for p in reward_keys[:-1]) + env_rewards
reward_b = metrics[reward_keys[-1]]
if self.config.RL.REWARD_FUSION.ENV_ON_ALL:
reward_b = reward_b + env_rewards
if self.config.RL.REWARD_FUSION.STRATEGY == "RAMP":
# Ramps from a to b
ramp_factor = min(1, max(0, (
self.count_steps - self.config.RL.REWARD_FUSION.RAMP.START
) / (
self.config.RL.REWARD_FUSION.RAMP.END - self.config.RL.REWARD_FUSION.RAMP.START
)))
return (reward_a * (1 - ramp_factor) + reward_b * ramp_factor).unsqueeze(0)
elif self.config.RL.REWARD_FUSION.STRATEGY == "SPLIT":
return torch.stack([reward_a, reward_b], dim=0)
raise NotImplementedError
def _collect_rollout_step(
self, rollouts, current_episode_reward, current_episode_env_reward, running_episode_stats, prior_obs_state=None
):
pth_time = 0.0
env_time = 0.0
ppo_cfg = self.config.RL.PPO
curiosity_cfg = ppo_cfg.CURIOSITY
t_sample_action = time.time()
# sample actions
with torch.no_grad(), torch.cuda.amp.autocast() if self._fp16_autocast else contextlib.suppress():
step_observation = {
k: v[rollouts.step] for k, v in rollouts.observations.items()
}
behavioral_index = 0
if self._get_policy_head_count() > 1 and self.count_steps > self.config.RL.REWARD_FUSION.SPLIT.TRANSITION:
behavioral_index = 1
(
values,
actions,
actions_log_probs,
recurrent_hidden_states,
obs,
) = self.actor_critic.act(
step_observation,
rollouts.get_recurrent_states()[rollouts.step],
rollouts.prev_actions[rollouts.step],
rollouts.masks[rollouts.step],
return_features=True,
behavioral_index=behavioral_index
)
pth_time += time.time() - t_sample_action
t_step_env = time.time()
outputs = self.envs.step([a[0].item() for a in actions])
observations, rewards, dones, infos = [list(x) for x in zip(*outputs)]
env_time += time.time() - t_step_env
t_update_stats = time.time()
# Hardcoded
def map_to_full_metric(m):
if m == 'reached':
return ['coverage', 'reached']
elif m == 'visit_count':
return ['coverage', 'visit_count']
elif m == "mini_reached":
return ['coverage', 'mini_reached']
else:
return [m]
TRACKED_METRICS = [map_to_full_metric(m) for m in self.config.RL.PPO.ROLLOUT.METRICS]
tracked_metrics = batch_list(infos, device=self.device, whitelist=TRACKED_METRICS)
batch = batch_obs(observations, device=self.device)
if self.semantic_predictor is not None:
batch["semantic"] = self.semantic_predictor(batch["rgb"], batch["depth"])
if ppo_cfg.POLICY.EVAL_SEMANTICS_CKPT == "weights/rednet_semmap_mp3d_40.pth":
batch["semantic"] -= 1
batch = apply_obs_transforms_batch(batch, self.obs_transforms)
rewards = torch.tensor(
rewards, dtype=torch.float, device=current_episode_reward.device
)
POLICY_METRICS = [map_to_full_metric(m) for m in self.config.RL.POLICIES if m is not "none"] # Careful not to duplicate this
policy_metrics = batch_list(infos, device=rewards.device, whitelist=POLICY_METRICS)
rewards = self._build_rewards(rewards, policy_metrics)
rewards = rewards.unsqueeze(-1) # b x k -> b x k x 1
# reward [k x b x 1] * masks [b x 1] -> [k x b x 1]
masks = torch.tensor(
[[0.0] if done else [1.0] for done in dones],
dtype=torch.float,
device=current_episode_reward.device,
)
current_episode_env_reward += rewards
curiosity_obs = None
if curiosity_cfg.USE_CURIOSITY:
# ! Curiosity not supported for multi-rewards. Assuming bonus belongs to first dimension
curiosity_obs = obs[curiosity_cfg.VISION_KEY]
if prior_obs_state is not None:
with torch.no_grad():
# Pass in the state after seeing the prior observation (our input state)
prior_state = rollouts.get_recurrent_states()[rollouts.step] if curiosity_cfg.USE_BELIEF else None
fp_error = self.agent.get_curiosity_error(
prior_obs_state,
curiosity_obs,
rollouts.prev_actions[rollouts.step],
beliefs=prior_state
)
curiosity_reward = torch.log(fp_error + 1.0).unsqueeze(1).to(rewards.device) * curiosity_cfg.REWARD_SCALE
# If the episode has ended (mask is 0), prev and current obs are not in same scene, zero reward
curiosity_reward = curiosity_reward * masks # b x 1
rewards[:,0] = rewards[:, 0] + curiosity_reward
current_episode_reward += rewards
running_episode_stats["reward"] += (1 - masks) * current_episode_reward # only add reward at episode end?
running_episode_stats["env_reward"] += (1 - masks) * current_episode_env_reward
running_episode_stats["count"] += 1 - masks
for k, v in self._extract_scalars_from_infos(infos).items():
v = torch.tensor(
v, dtype=torch.float, device=current_episode_reward.device
).unsqueeze(1)
if k not in running_episode_stats:
running_episode_stats[k] = torch.zeros_like(
running_episode_stats["count"]
)
running_episode_stats[k] += (1 - masks) * v
current_episode_reward *= masks
current_episode_env_reward *= masks
if self._static_encoder:
if self._fp16_mixed:
raise Exception("Not implemented")
with torch.no_grad(), torch.cuda.amp.autocast() if self._fp16_autocast else contextlib.suppress():
batch["visual_features"] = self._encoder(batch)
if self._get_policy_head_count() == 1: # Single-policy agents don't return the policy dimension.
values = values.unsqueeze(1)
actions_log_probs = actions_log_probs.unsqueeze(1)
rollouts.insert(
batch,
recurrent_hidden_states,
actions,
actions_log_probs, # b x k x 1
values, # b x k x 1
rewards, # k x b x 1
masks,
tracked_metrics
)
pth_time += time.time() - t_update_stats
return pth_time, env_time, self.envs.num_envs, curiosity_obs
def _update_agent(self, ppo_cfg, rollouts):
t_update_model = time.time()
with torch.no_grad(), torch.cuda.amp.autocast() if self._fp16_autocast else contextlib.suppress():
last_observation = {
k: v[rollouts.step] for k, v in rollouts.observations.items()
}
next_value = self.actor_critic.get_value(
last_observation,
rollouts.get_recurrent_states()[rollouts.step],
rollouts.prev_actions[rollouts.step],
rollouts.masks[rollouts.step],
).detach()
behavioral_index = 0
if self._get_policy_head_count() > 1 and self.count_steps > self.config.RL.REWARD_FUSION.SPLIT.TRANSITION:
behavioral_index = 1
iw_clipped = ppo_cfg.SPLIT_IW_BOUNDS if hasattr(ppo_cfg, 'SPLIT_IW_BOUNDS') else \
[1.0 - ppo_cfg.clip_param, 1.0 + ppo_cfg.clip_param]
rollouts.compute_returns(
next_value, ppo_cfg.use_gae, ppo_cfg.gamma, ppo_cfg.tau,
behavioral_index=behavioral_index,
importance_weight=self.config.RL.REWARD_FUSION.SPLIT.IMPORTANCE_WEIGHT,
weight_clip=iw_clipped,
)
(
value_loss,
action_loss,
dist_entropy,
aux_task_losses,
aux_dist_entropy,
aux_weights,
inv_curiosity,
fwd_curiosity,
) = self.agent.update(
rollouts,
ppo_cfg.gamma,
behavioral_index=behavioral_index
)
rollouts.after_update()
return (
time.time() - t_update_model,
value_loss,
action_loss,
dist_entropy,
aux_task_losses,
aux_dist_entropy,
aux_weights,
inv_curiosity,
fwd_curiosity,
)
def _make_deltas(self, window_episode_stats):
deltas = {
k: (
(v[-1] - v[0]).flatten(start_dim=-2).sum(dim=-1) # k x b x 1 OR b x 1 -> k or 1
if len(v) > 1
else v[0].flatten(start_dim=-2).sum(dim=-1)
)
for k, v in window_episode_stats.items()
}
# Get items, and flatten rewards to report multi-policy
flat_deltas = {}
for k, v in deltas.items():
if len(v.size()) > 0:
flat_deltas[k] = v[0].item()
for i in range(1, v.size(0)):
flat_deltas[f"{k}_{i}"] = v[i].item()
else:
flat_deltas[k] = v.item()
flat_deltas["count"] = max(flat_deltas["count"], 1.0)
return flat_deltas
def train(self, ckpt_path="", ckpt=-1, start_updates=0) -> None:
r"""Main method for training PPO.
Returns:
None
"""
self.envs = construct_envs(
self.config, get_env_class(self.config.ENV_NAME)
)
self.device = (
torch.device("cuda", self.config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
ppo_cfg = self.config.RL.PPO
task_cfg = self.config.TASK_CONFIG.TASK
policy_encoders_map = get_vision_encoder_inputs(ppo_cfg)
"""
Initialize auxiliary tasks
"""
aux_cfg = self.config.RL.AUX_TASKS
init_aux_tasks, num_recurrent_memories, aux_task_strings, aux_encoder_insts = \
self._setup_auxiliary_tasks(aux_cfg, ppo_cfg, task_cfg,
observation_space=observation_space, policy_encoders=policy_encoders_map)
self._setup_actor_critic_agent(
ppo_cfg, task_cfg, aux_cfg,
init_aux_tasks,
aux_encoders=aux_encoder_insts,
policy_encoders=policy_encoders_map
)
rollouts = RolloutStorage(
ppo_cfg.num_steps,
self.envs.num_envs,
self.obs_space,
self.envs.action_spaces[0],
ppo_cfg.hidden_size,
num_recurrent_memories=num_recurrent_memories,
num_policy_heads=self._get_policy_head_count(),
metrics=ppo_cfg.ROLLOUT.METRICS
)
rollouts.to(self.device)
if self._fp16_mixed:
rollouts.to_fp16()
observations = self.envs.reset()
batch = batch_obs(observations, device=self.device)
if self.semantic_predictor is not None:
batch["semantic"] = self.semantic_predictor(batch["rgb"], batch["depth"])
batch = apply_obs_transforms_batch(batch, self.obs_transforms)
for sensor in rollouts.observations:
rollouts.observations[sensor][0].copy_(batch[sensor])
# batch and observations may contain shared PyTorch CUDA
# tensors. We must explicitly clear them here otherwise
# they will be kept in memory for the entire duration of training!
batch = None
observations = None
logger.info(
"agent number of parameters: {}".format(
sum(param.numel() for param in self.agent.actor_critic.parameters())
)
)
| |
# -*- coding: utf-8 -*-
# Copyright 2009-2017 Yelp and Contributors
# Copyright 2018-2019 Yelp
# Copyright 2020 Affirm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract base class for all runners that execute binaries/scripts
(that is, everything but inline mode).
"""
import logging
import os
import os.path
import pipes
import re
import sys
from mrjob.py2 import PY2
from platform import python_implementation
from subprocess import Popen
from subprocess import PIPE
try:
import pty
pty # quiet "redefinition of unused ..." warning from pyflakes
except ImportError:
pty = None
try:
import pyspark
pyspark # quiet "redefinition of unused ..." warning from pyflakes
except ImportError:
pyspark = None
import mrjob.step
from mrjob.compat import translate_jobconf
from mrjob.conf import combine_cmds
from mrjob.conf import combine_dicts
from mrjob.logs.log4j import _parse_hadoop_log4j_records
from mrjob.logs.spark import _parse_spark_log
from mrjob.logs.step import _eio_to_eof
from mrjob.py2 import string_types
from mrjob.runner import MRJobRunner
from mrjob.setup import parse_setup_cmd
from mrjob.util import cmd_line
from mrjob.util import shlex_split
from mrjob.util import unique
from mrjob.util import which
from mrjob.util import zip_dir
log = logging.getLogger(__name__)
# no need to escape arguments that only include these characters
_HADOOP_SAFE_ARG_RE = re.compile(r'^[\w\./=-]*$')
# used to handle manifest files
_MANIFEST_INPUT_FORMAT = 'org.apache.hadoop.mapred.lib.NLineInputFormat'
# map archive file extensions to the command used to unarchive them
_EXT_TO_UNARCHIVE_CMD = {
'.zip': 'unzip -o %(file)s -d %(dir)s',
'.tar': 'mkdir %(dir)s; tar xf %(file)s -C %(dir)s',
'.tar.gz': 'mkdir %(dir)s; tar xfz %(file)s -C %(dir)s',
'.tgz': 'mkdir %(dir)s; tar xfz %(file)s -C %(dir)s',
}
def _unarchive_cmd(path):
"""Look up the unarchive command to use with the given file extension,
or raise KeyError if there is no matching command."""
for ext, unarchive_cmd in sorted(_EXT_TO_UNARCHIVE_CMD.items()):
# use this so we can match e.g. mrjob-0.7.0.tar.gz
if path.endswith(ext):
return unarchive_cmd
raise KeyError('unknown archive type: %s' % path)
class MRJobBinRunner(MRJobRunner):
OPT_NAMES = MRJobRunner.OPT_NAMES | {
'python_bin',
'sh_bin',
'spark_args',
'spark_submit_bin',
'task_python_bin',
}
def __init__(self, **kwargs):
super(MRJobBinRunner, self).__init__(**kwargs)
# where a zip file of the mrjob library is stored locally
self._mrjob_zip_path = None
# we'll create the setup wrapper scripts later
self._setup_wrapper_script_path = None
self._manifest_setup_script_path = None
self._spark_python_wrapper_path = None
# self._setup is a list of shell commands with path dicts
# interleaved; see mrjob.setup.parse_setup_cmd() for details
self._setup = [parse_setup_cmd(cmd) for cmd in self._opts['setup']]
for cmd in self._setup:
for token in cmd:
if isinstance(token, dict):
# convert dir archives tokens to archives
if token['type'] == 'dir':
# feed the archive's path to self._working_dir_mgr
token['path'] = self._dir_archive_path(token['path'])
token['type'] = 'archive'
self._working_dir_mgr.add(**token)
# warning: no setup scripts on Spark when no working dir
if self._setup and self._has_pyspark_steps() and not(
self._spark_executors_have_own_wd()):
log.warning("setup commands aren't supported on Spark master %r" %
self._spark_master())
# --py-files on Spark doesn't allow '#' (see #1375)
if any('#' in path for path in self._opts['py_files']):
raise ValueError("py_files cannot contain '#'")
# Keep track of where the spark-submit binary is
self._spark_submit_bin = self._opts['spark_submit_bin']
# AFFIRM: parameterized cmdenv
# See commit <PASSWORD>
for key, value in self._opts['cmdenv'].items():
if '%s' in value:
job_key = self._job_key.replace('.', '_')
self._opts['cmdenv'][key] = value % job_key
@classmethod
def _default_opts(cls):
return combine_dicts(
super(MRJobBinRunner, cls)._default_opts(),
dict(
read_logs=True,
)
)
def _fix_opt(self, opt_key, opt_value, source):
"""Check sh_bin"""
opt_value = super(MRJobBinRunner, self)._fix_opt(
opt_key, opt_value, source)
# check that sh_bin doesn't have too many args
if opt_key == 'sh_bin':
# opt_value is usually a string, combiner makes it a list of args
sh_bin = combine_cmds(opt_value)
# empty sh_bin just means to use the default, see #1926
# make these hard requirements in v0.7.0?
if len(sh_bin) > 1 and not os.path.isabs(sh_bin[0]):
log.warning('sh_bin (from %s) should use an absolute path'
' if you want it to take arguments' % source)
elif len(sh_bin) > 2:
log.warning('sh_bin (from %s) should not take more than one'
' argument' % source)
return opt_value
### python binary ###
def _python_bin(self):
"""Python binary used for everything other than invoking the job.
For running job tasks (e.g. ``--mapper``, ``--spark``), we use
:py:meth:`_task_python_bin`, which can be set to a different value
by setting :mrjob-opt:`task_python_bin`.
Ways mrjob uses Python other than running tasks:
* file locking in setup wrapper scripts
* finding site-packages dir to bootstrap mrjob on clusters
* invoking ``cat.py`` in local mode
* the Python binary for Spark (``$PYSPARK_PYTHON``)
"""
# python_bin isn't an option for inline runners
return self._opts['python_bin'] or self._default_python_bin()
def _task_python_bin(self):
"""Python binary used to invoke job with ``--mapper``,
``--reducer``, ``--spark``, etc."""
return (self._opts['task_python_bin'] or
self._python_bin())
def _default_python_bin(self, local=False):
"""The default python command. If local is true, try to use
sys.executable. Otherwise use 'python2.7' or 'python3' as appropriate.
This returns a single-item list (because it's a command).
"""
is_pypy = (python_implementation() == 'PyPy')
if local and sys.executable:
return [sys.executable]
else:
if PY2:
return ['pypy'] if is_pypy else ['python2.7']
else:
return ['pypy3'] if is_pypy else ['python3']
### running MRJob scripts ###
def _script_args_for_step(self, step_num, mrc, input_manifest=False):
args = (self._task_python_bin() +
[self._working_dir_mgr.name('file', self._script_path)] +
self._args_for_task(step_num, mrc))
if input_manifest and mrc == 'mapper':
wrapper = self._manifest_setup_script_path
elif self._setup_wrapper_script_path:
wrapper = self._setup_wrapper_script_path
else:
return args
return (self._sh_bin() + [
self._working_dir_mgr.name('file', wrapper)] + args)
def _substep_args(self, step_num, mrc):
step = self._get_step(step_num)
if step[mrc]['type'] == 'command':
cmd = step[mrc]['command']
# never wrap custom hadoop streaming commands in bash
if isinstance(cmd, string_types):
return shlex_split(cmd)
else:
return cmd
elif step[mrc]['type'] == 'script':
script_args = self._script_args_for_step(
step_num, mrc, input_manifest=step.get('input_manifest'))
if 'pre_filter' in step[mrc]:
return self._sh_wrap(
'%s | %s' % (step[mrc]['pre_filter'],
cmd_line(script_args)))
else:
return script_args
else:
raise ValueError("Invalid %s step %d: %r" % (
mrc, step_num, step[mrc]))
### hadoop streaming ###
def _render_substep(self, step_num, mrc):
step = self._get_step(step_num)
if mrc in step:
# cmd_line() does things that shell is fine with but
# Hadoop Streaming finds confusing.
return _hadoop_cmd_line(self._substep_args(step_num, mrc))
else:
if mrc == 'mapper':
return 'cat'
else:
return None
def _hadoop_args_for_step(self, step_num):
"""Build a list of extra arguments to the hadoop binary.
This handles *cmdenv*, *hadoop_extra_args*, *hadoop_input_format*,
*hadoop_output_format*, *jobconf*, and *partitioner*.
This doesn't handle input, output, mappers, reducers, or uploading
files.
"""
args = []
# -libjars, -D
args.extend(self._hadoop_generic_args_for_step(step_num))
# hadoop_extra_args (if defined; it's not for sim runners)
# this has to come after -D because it may include streaming-specific
# args (see #1332).
args.extend(self._opts.get('hadoop_extra_args', ()))
# partitioner
partitioner = self._partitioner or self._sort_values_partitioner()
if partitioner:
args.extend(['-partitioner', partitioner])
# cmdenv
for key, value in sorted(self._opts['cmdenv'].items()):
args.append('-cmdenv')
args.append('%s=%s' % (key, value))
# hadoop_input_format
if step_num == 0:
if self._uses_input_manifest():
args.extend(['-inputformat', _MANIFEST_INPUT_FORMAT])
elif self._hadoop_input_format:
args.extend(['-inputformat', self._hadoop_input_format])
# hadoop_output_format
if (step_num == self._num_steps() - 1 and self._hadoop_output_format):
args.extend(['-outputformat', self._hadoop_output_format])
return args
def _hadoop_streaming_jar_args(self, step_num):
"""The arguments that come after ``hadoop jar <streaming jar path>``
when running a Hadoop streaming job."""
args = []
# get command for each part of the job
mapper, combiner, reducer = (
self._hadoop_streaming_commands(step_num))
# set up uploading from HDFS/cloud storage to the working dir
args.extend(self._upload_args())
# if no reducer, shut off reducer tasks. This has to come before
# extra hadoop args, which could contain jar-specific args
# (e.g. -outputformat). See #1331.
#
# might want to just integrate this into _hadoop_args_for_step?
if not reducer:
args.extend(['-D', ('%s=0' % translate_jobconf(
'mapreduce.job.reduces', self.get_hadoop_version()))])
# Add extra hadoop args first as hadoop args could be a hadoop
# specific argument which must come before job
# specific args.
args.extend(self._hadoop_args_for_step(step_num))
# set up input
for input_uri in self._step_input_uris(step_num):
args.extend(['-input', input_uri])
# set up output
args.append('-output')
args.append(self._step_output_uri(step_num))
args.append('-mapper')
args.append(mapper)
if combiner:
args.append('-combiner')
args.append(combiner)
if reducer:
args.append('-reducer')
args.append(reducer)
return args
def _hadoop_streaming_commands(self, step_num):
return (
self._render_substep(step_num, 'mapper'),
self._render_substep(step_num, 'combiner'),
self._render_substep(step_num, 'reducer'),
)
def _hadoop_generic_args_for_step(self, step_num):
"""Arguments like -D and -libjars that apply to every Hadoop
subcommand."""
args = []
# libjars (#198)
libjar_paths = self._libjar_paths()
if libjar_paths:
args.extend(['-libjars', ','.join(libjar_paths)])
# jobconf (-D)
jobconf = self._jobconf_for_step(step_num)
for key, value in sorted(jobconf.items()):
args.extend(['-D', '%s=%s' % (key, value)])
return args
def _libjar_paths(self):
"""Paths or URIs of libjars, from Hadoop/Spark's point of view.
Override this | |
arrow keys to move.']
while self.opening_scene == True: # while opening scene is True display text and background
self.display.window.blit(Boundary.back_ground, (0,0)) # display background
self.title_text = self.fonts[0].render(self.titles[0], 1, self.color)
self.title_text2 = self.fonts[0].render(self.titles[1], 1, self.color)
self.display.window.blit(self.title_text, ((self.display.width//2) - (self.title_text.get_width()//2), 70))
self.display.window.blit(self.title_text2, ((self.display.width//2) - (self.title_text2.get_width()//2), 130))
self.body_location = 300 # established in loop so it is reset each time
for body_text in self.body:
b_t = self.fonts[1].render(body_text, 1, self.color)
self.display.window.blit(b_t, ((self.display.width//2) - (b_t.get_width()//2), self.body_location))
self.body_location += 30 # move body text down 30 at a time
self.instructions_location = 600 # established in loop so it is reset each time
for instruction in self.instructions:
instructions_text = self.fonts[1].render(instruction, 1, self.color)
self.display.window.blit(instructions_text, ((self.display.width//2) - (instructions_text.get_width()//2),
self.instructions_location))
self.instructions_location += 30
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.KEYUP: # game will start upon release of any key
self.opening_scene = False # kick back out to main loop
if event.type == pygame.QUIT:
self.opening_scene = False
self.quit = True
pygame.quit()
def end_game(self):
'''Create Credits object and restart game upon user input
Attributes
end_music (pygame music): music played at game over screen
game_over (Credits obj): credits object with the current final score
displaying_credits (bool): control for end game loop
'''
pygame.mixer.music.stop()
self.end_music = pygame.mixer.music.load('audio/Battle-Conflict.mp3')
pygame.mixer.music.play(-1)
self.game_over = Credits(self.score.score) # create credits obj
self.game_over.write_highscores()
self.displaying_credits = True
while self.displaying_credits:
#if self.displaying_credits == False:
# break
self.display.window.blit(Boundary.back_ground, (0,0)) # display background to screen
self.game_over.display_credits(self.display.window) # print credits to screen
pygame.display.update()
pygame.time.delay(2000) # delay pygame so key pressing at end of game doesn't auto restart
for event in pygame.event.get():
if event.type == pygame.KEYUP: # reset game upon pressing and release of key
# Reset all object class attribute list to empty for new game
Asteroid.asteroid_lst[:] = []
ShooterObject.shots_queue[:] = []
Explosion.explosion_lst[:] = []
Health_PowerUp.current_powerups[:] = []
# using initial game setup commands to reset everything upon restart.
self.music = pygame.mixer.music.load('audio/Battle-SilverMoon.mp3')
pygame.mixer.music.play(-1)
self.clock = pygame.time.Clock()
self.run = True
self.display = Boundary(500, 700)
self.main_sprite = Character()
self.mothership = Mothership()
self.score = Score()
self.count = 0
self.center_frame = 0
self.left_right_frame = 0
self.next_frame = time.time()
self.most_recent_key = None
self.powerup_timer = 0
self.displaying_credits = False
if event.type == pygame.QUIT:
self.displaying_credits = False
pygame.quit()
#break
def handle_key_presses(self):
'''Move character right and left, setting character movement states and correct frames to be displayed'''
keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT]: # left arrow key to move left
self.main_sprite.move_left(self.display) # using display as input to set boundaries for movement.
if self.main_sprite.left == False: # only allowing access to branch if False so it won't run while holding down key
self.main_sprite.left = True # sprite is now moving left
self.main_sprite.right = False # right and center are now both False
self.main_sprite.center = False
self.left_right_frame = 0 # resetting left&right frame count. Will help display intermediate strafe states
self.most_recent_key = 'l' # setting left so intermediate strafe images used to level out spaceship
elif keys[pygame.K_RIGHT]: # right arrow key to move right
self.main_sprite.move_right(self.display) # using display as input to set boundaries for movement.
if self.main_sprite.right == False: # only allowing access to branch if False so it won't run while holding down key
self.main_sprite.right = True
self.main_sprite.left = False
self.main_sprite.center = False
self.left_right_frame = 0
self.most_recent_key = 'r' # setting right so intermediate strafe images used to level out spaceship
else:
if self.main_sprite.center == False: # once right or left keys are let go, if statement will run
self.main_sprite.center = True
self.main_sprite.right = False
self.main_sprite.left = False
self.left_right_frame = 0 # resetting upon return to center will allow us to access intermediate strafe states
def generate_shots(self):
'''Generate shots fired from spaceship at a constant rate'''
if (self.count % ShooterObject.shot_rate == 0): # every 50 loops the spaceship will generate a ShooterObject(bullet)
self.main_sprite.shoot('normal') # normal indicates the bullet type and specifies its properties upon creation.
def generate_asteroids(self):
'''Generate asteroids at a random rate'''
# calling asteroid random number generator if numbers are the same then asteroid is generated and placed on screen
if Asteroid.ast_diff_setting[Asteroid.current_setting] == random.randint(0,Asteroid.ast_diff_setting[Asteroid.current_setting]):
self.a = Asteroid()
def generate_powerup(self):
'''Generate health and timed power ups at discrete intervals and game conditions'''
if self.count > self.powerup_health_timer: # health has its own timer and now is completely unlinked to other powerup generation
if self.mothership.health_amt != 1000: # only if the mothership has taken on some damage should powerups begin to generate
# powerup generation is a function of game health with a max generation rate of 300
if self.mothership.health_amt*2 == random.randint(0,self.mothership.health_amt*2 + 300):
self.p = Health_PowerUp()
self.powerup_health_timer = self.count + 500 # cooldown timer for power ups are set at ~8 seconds
if self.count > self.powerup_timer: # power up cooldown has expired
if TimedPowerUp.activated == False: # only allow power up generation if a powerup isn't in current use.
if self.mothership.health_amt >= 500: # havin' a good time then you should get a double XP
if 1000 == random.randint(0,1000):
TimedPowerUp('Double XP')
self.powerup_timer = self.count + 500 # setting cooldown for powerups ~8 seconds
if self.mothership.health_amt <= 500: # about t' die might need Insta-Kill
if 1000 == random.randint(0,1000):
TimedPowerUp('Insta-Kill')
self.powerup_timer = self.count + 500
def handle_collisions(self):
'''Loop through all object types on screen and determine if collisions have occurred'''
for powerup in Health_PowerUp.current_powerups:
if (powerup.x > self.main_sprite.x) and (powerup.x < self.main_sprite.x + self.main_sprite.width)\
and (powerup.y + powerup.height > self.main_sprite.y)\
and (powerup.y + powerup.height < self.main_sprite.y + self.main_sprite.height): # within boundaries of main sprite
if powerup.activated == False: # set so power up can only give mothership health once.
self.mothership.health_amt += powerup.health_add # increment mothership's health
self.mothership.update_damage() # update motherships damage
powerup.activated = True # activate powerup
if powerup.activated == True:
if powerup.powerup_display_timer > 25: # turn off powerup activate after counter has reached 25
powerup.activated = False
Health_PowerUp.current_powerups.pop(Health_PowerUp.current_powerups.index(powerup))
del powerup # remove powerup from instance list and delete
for t_powerup in TimedPowerUp.current_powerups:
if (t_powerup.x > self.main_sprite.x) and (t_powerup.x < self.main_sprite.x + self.main_sprite.width)\
and (t_powerup.y + t_powerup.height > self.main_sprite.y)\
and (t_powerup.y + t_powerup.height < self.main_sprite.y + self.main_sprite.height): #within boundaries
if TimedPowerUp.activated == False: # only turn switch if False, this keeps actions from repeating
TimedPowerUp.activated = True
t_powerup.effect_timer = self.count # setting powerup timer to current game loop number
if TimedPowerUp.activated == True:
if self.count - t_powerup.effect_timer > t_powerup.powerup_duration: # ~10 seconds worth of powerup
TimedPowerUp.activated = False # undos all effects from activation
TimedPowerUp.current_powerups.pop(TimedPowerUp.current_powerups.index(t_powerup))
del t_powerup # remove of instance list and delete
for bullet in ShooterObject.shots_queue:
for asteroid in Asteroid.asteroid_lst:
if (bullet.start_x >= asteroid.x) and (bullet.start_x <= asteroid.x + asteroid.width)\
and (bullet.end_y <= asteroid.y + asteroid.width): # check to see if bullet is within asteroid hit box
# if within hit box, then more complex calculation to see if bullet is within radius is performed.
if ((bullet.end_y - (asteroid.y + (asteroid.width/2)))**2 + (bullet.start_x - (asteroid.x + (asteroid.width/2)))**2)**0.5 < (asteroid.width/2):
bullet.hit = True # register hit and reduce asteroid health
if (TimedPowerUp.activated == True) and (TimedPowerUp.current_option == 'Insta-Kill'): # powerup effect
asteroid.health_amt = 0 # instantly reduct asteroid health to zero.
asteroid.damage_taken += bullet.damage
else:
asteroid.health_amt -= bullet.damage # if no powerup then just reduce ast health by bullet damage
asteroid.damage_taken += bullet.damage
asteroid.update_health_bars()
if (asteroid.health_amt <= 0) or (asteroid.y + asteroid.width > 650): # check deletion conditions
if asteroid.health_amt <= 0:
if (TimedPowerUp.activated == True) and (TimedPowerUp.current_option == 'Double XP'): # powerup effect
self.score.score += (asteroid.width * 2) # double the amount of XP you receive
else:
self.score.score += asteroid.width # increment score asteroid width amt
asteroid.destruction_method = 'negative health' # method informs that xp gain should be shown on screen
elif (asteroid.y + asteroid.width > 650): # has made contact with mothership
asteroid.destruction_method = 'off screen'
self.mothership.health_amt -= asteroid.damage # update mothership health and damage
self.mothership.damage_taken += asteroid.damage
self.mothership.update_damage()
Asteroid.asteroid_lst.pop(Asteroid.asteroid_lst.index(asteroid))
asteroid.generate_explosion() # generate asteroid before deleting obj
del | |
= reference_round_df.groupby('barcode_reference_dot_id')
ref_selected_df_no_duplicates = reference_round_df
for brdi, grp in barcoded_round_grouped:
barcode = np.zeros([barcode_length],dtype=np.int8)
barcode[grp.round_num.values.astype(np.int8)-1] = 1
#hamming_dist, index_gene = nn_sklearn.kneighbors(barcode.reshape(1, -1), return_distance=True)
#gene= codebook_df.loc[index_gene.reshape(index_gene.shape[0]),'Gene'].tolist()
barcode = barcode.tostring()
if len(ref_selected_df_no_duplicates) != 0:
ref_selected_df_no_duplicates.loc[ref_selected_df_no_duplicates.barcode_reference_dot_id == brdi,'raw_barcodes'] = barcode
#ref_selected_df_no_duplicates.loc[ref_selected_df_no_duplicates.barcode_reference_dot_id == brdi,'decoded_gene_name'] = gene
#ref_selected_df_no_duplicates.loc[ref_selected_df_no_duplicates.barcode_reference_dot_id == brdi,'hamming_distance'] = hamming_dist.flatten()[0]
#fish_counts.loc[grp.index,'barcode_reference_dot_id'] = brdi
#fish_counts.loc[grp.index,'raw_barcodes'] = barcode
#dists, index = nn_sklearn.kneighbors(all_barcodes, return_distance=True)
all_decoded_dots_list.append(ref_selected_df_no_duplicates)
if all_decoded_dots_list:
all_decoded_dots_df = pd.concat(all_decoded_dots_list,ignore_index=False)
codebook_df = convert_str_codebook(codebook_df,'Code')
codebook_array = make_codebook_array(codebook_df,'Code')
nn_sklearn = NearestNeighbors(n_neighbors=1, metric="hamming")
nn_sklearn.fit(codebook_array)
all_barcodes = np.vstack(all_decoded_dots_df.raw_barcodes.map(lambda x: np.frombuffer(x, np.int8)).values)
dists_arr, index_arr = nn_sklearn.kneighbors(all_barcodes, return_distance=True)
genes=codebook_df.loc[index_arr.reshape(index_arr.shape[0]),'Gene'].tolist()
all_decoded_dots_df.loc[:,'decoded_genes'] = genes
all_decoded_dots_df.loc[:,'hamming_distance'] = dists_arr
all_decoded_dots_df.loc[:,'number_positive_bits'] = all_barcodes.sum(axis=1)
all_decoded_dots_df['barcodes_extraction_resolution'] = barcodes_extraction_resolution
else:
all_decoded_dots_df = pd.DataFrame(columns = registered_counts_df.columns)
all_decoded_dots_df['decoded_genes'] = np.nan
all_decoded_dots_df['hamming_distance'] = np.nan
all_decoded_dots_df['number_positive_bits'] = np.nan
all_decoded_dots_df['barcode_reference_dot_id'] = np.nan
all_decoded_dots_df['raw_barcodes'] = np.nan
all_decoded_dots_df['barcodes_extraction_resolution'] = barcodes_extraction_resolution
# Save barcoded_round and all_decoded_dots_df
return barcoded_round, all_decoded_dots_df
# TODO Remove all the functions below
######## -------------------------------------------------------------------
class extract_barcodes_NN():
"""
Class used to extract the barcodes from the registered
counts using nearest neighbour
Parameters:
-----------
counts: pandas.DataFrame
pandas file with the fov counts after
registration
analysis_parameters: dict
parameters for data processing
experiment_config: Dict
dictionary with the experimental data
codebook_df: pandas.DataFrame
pandas file with the codebook used to
deconvolve the barcode
NB: if there is a problem with the registration the barcode assigned
will be 0*barcode_length
"""
def __init__(self, counts, analysis_parameters:Dict,experiment_config:Dict,codebook_df,file_tags,status:str):
self.barcodes_extraction_resolution = analysis_parameters['BarcodesExtractionResolution']
self.RegistrationMinMatchingBeads = analysis_parameters['RegistrationMinMatchingBeads']
self.barcode_length = experiment_config['Barcode_length']
self.counts = counts
self.logger = selected_logger()
self.codebook_df = codebook_df
self.file_tags = file_tags
self.status = status
self.registration_errors = Registration_errors()
@staticmethod
def barcode_nn(counts_df, ref_round_number, barcodes_extraction_resolution):
column_names = list(counts_df.columns.values)
column_names = column_names.append('barcode_reference_dot_id')
barcoded_df = pd.DataFrame(columns=column_names)
reference_array = counts_df.loc[counts_df.round_num == ref_round_number, ['r_px_registered','c_px_registered']].to_numpy()
reference_round_df = counts_df.loc[counts_df.round_num == ref_round_number,:].reset_index(drop=True)
# Step one (all dots not in round 1)
coords_compare = counts_df.loc[counts_df.round_num != ref_round_number, ['r_px_registered','c_px_registered']].to_numpy()
compare_df = counts_df.loc[counts_df.round_num != ref_round_number,:].reset_index(drop=True)
if (reference_array.shape[0] >0) and (coords_compare.shape[0] >0):
# initialize network
nn = NearestNeighbors(n_neighbors=1, metric="euclidean")
nn.fit(reference_array)
# Get the nn
dists, indices = nn.kneighbors(coords_compare, return_distance=True)
# select only the nn that are below barcodes_extraction_resolution distance
idx_selected_coords_compare = np.where(dists <= barcodes_extraction_resolution)[0]
compare_selected_df = compare_df.loc[idx_selected_coords_compare,:]
compare_selected_df['barcode_reference_dot_id'] = np.nan
# ref_idx = indices[idx_selected_coords_compare]
# compare_selected_df.loc[compare_selected_df.index.isin(idx_selected_coords_compare),'barcode_reference_dot_id'] = reference_round_df.loc[ref_idx,'dot_id'].values[0]
for idx in idx_selected_coords_compare:
ref_idx = indices[idx]
compare_selected_df.loc[idx,'barcode_reference_dot_id'] = reference_round_df.loc[ref_idx,'dot_id'].values[0]
reference_round_df['barcode_reference_dot_id'] = reference_round_df.dot_id
barcoded_df = barcoded_df.append([compare_selected_df, reference_round_df], ignore_index=True)
compare_df = compare_df.drop(compare_selected_df.index)
compare_df = compare_df.reset_index(drop=True)
return compare_df, barcoded_df
@staticmethod
def convert_str_codebook(codebook_df,column_name):
codebook_df[column_name] = codebook_df[column_name].map(lambda x: np.frombuffer(x, np.int8))
return codebook_df
@staticmethod
def make_codebook_array(codebook_df,column_name):
codebook_array = np.zeros((len(codebook_df[column_name]),codebook_df[column_name][0].shape[0]))
for idx, el in enumerate(codebook_df[column_name]):
row = codebook_df[column_name][idx]
row = row[np.newaxis,:]
codebook_array[idx,:] = row
return codebook_array
def run_extraction(self):
data_models = Output_models()
registration_errors = Registration_errors()
fov = self.file_tags['fov']
channel = self.file_tags['channel']
self.barcoded_fov_df = data_models.barcode_analysis_df
self.barcoded_fov_df.attrs = self.counts.attrs
if self.status == 'FAILED':
error = self.counts['min_number_matching_dots_registration'].values[0]
round_num = self.counts['round_num'].values[0]
self.barcoded_fov_df = self.barcoded_fov_df.append({'min_number_matching_dots_registration':error,
'fov_num':int(fov),'dot_channel':channel,'round_num': round_num },ignore_index=True)
elif self.status == 'SUCCESS':
if (min(self.counts.loc[:,'min_number_matching_dots_registration']) < self.RegistrationMinMatchingBeads):
round_num = self.counts['round_num'].values[0]
self.barcoded_fov_df = self.barcoded_fov_df.append({'min_number_matching_dots_registration':registration_errors.registration_below_extraction_resolution,
'fov_num':int(fov),'dot_channel':channel,'round_num': round_num},ignore_index=True)
self.status = 'FAILED'
else:
hd_2 = 2 / self.barcode_length
hd_3 = 3 / self.barcode_length
# barcode_length = len(self.counts['round_num'].unique())
rounds = np.arange(1,self.barcode_length+1)
self.codebook_df = self.convert_str_codebook(self.codebook_df,'Code')
codebook_array = self.make_codebook_array(self.codebook_df,'Code')
nn_sklearn = NearestNeighbors(n_neighbors=1, metric="hamming")
nn_sklearn.fit(codebook_array)
# remove points with np.NAN
# self.counts = self.counts.dropna()
for round_num in rounds:
compare_df, barcoded_df = self.barcode_nn(self.counts, round_num, self.barcodes_extraction_resolution)
self.barcoded_fov_df = self.barcoded_fov_df.append(barcoded_df, ignore_index=True)
self.counts = compare_df
self.counts['barcode_reference_dot_id'] = self.counts.dot_id
self.barcoded_fov_df = self.barcoded_fov_df.append(self.counts, ignore_index=True)
self.barcoded_fov_df['barcodes_extraction_resolution'] = self.barcodes_extraction_resolution
self.grpd = self.barcoded_fov_df.groupby('barcode_reference_dot_id')
# self.all_barcodes = {}
# for name, group in self.grpd:
# rounds_num = group.round_num.values
# dot_ids = group.dot_id.values
# rounds_num = rounds_num.astype(int)
# barcode = np.zeros([self.barcode_length],dtype=np.int8)
# barcode[(rounds_num-1)] += 1
# dists_arr, index_arr = nn_sklearn.kneighbors(barcode.reshape(1, -1), return_distance=True)
# gene=self.codebook_df.loc[index_arr.reshape(index_arr.shape[0]),'Gene'].tolist()[0]
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'raw_barcodes'] = barcode.tostring()
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'all_Hdistance_genes'] = gene
# if dists_arr[0][0] == 0:
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'0Hdistance_genes'] = gene
# elif dists_arr[0][0] < hd_2:
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'below2Hdistance_genes'] = gene
# elif dists_arr[0][0] < hd_3:
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'below3Hdistance_genes'] = gene
barcode_reference_dot_id_list = []
num_unique_dots = np.unique(self.barcoded_fov_df.loc[:,'barcode_reference_dot_id']).shape[0]
# There are no dots is the df
if num_unique_dots > 0:
all_barcodes = np.zeros([num_unique_dots,self.barcode_length],dtype=np.int8)
for idx, (name, group) in enumerate(self.grpd):
barcode_reference_dot_id_list.append(name)
barcode = np.zeros([self.barcode_length],dtype=np.int8)
rounds_num = group.round_num.values
rounds_num = rounds_num.astype(int)
barcode[(rounds_num-1)] += 1
all_barcodes[idx,:] = barcode
dists_arr, index_arr = nn_sklearn.kneighbors(all_barcodes, return_distance=True)
genes=self.codebook_df.loc[index_arr.reshape(index_arr.shape[0]),'Gene'].tolist()
for idx,name in enumerate(barcode_reference_dot_id_list):
barcode = all_barcodes[idx,:]
gene = genes[idx]
hd = dists_arr[idx][0]
cols = ['raw_barcodes','all_Hdistance_genes','number_positive_bits','hamming_distance'] # will add last column depending on hd
writing_data = [barcode.tostring(),gene,barcode.sum(),hd]
if hd == 0:
cols = cols + ['zeroHdistance_genes']
writing_data = writing_data + [gene]
if hd < hd_2:
cols = cols + ['below2Hdistance_genes']
writing_data = writing_data + [gene]
if hd < hd_3:
cols = cols + ['below3Hdistance_genes']
writing_data = writing_data + [gene]
self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,cols] = writing_data
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'raw_barcodes'] = barcode.tostring()
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'all_Hdistance_genes'] = gene
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'number_positive_bits'] = barcode.sum()
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'hamming_distance'] = hd
# if hd == 0:
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'0Hdistance_genes'] = gene
# elif hd < hd_2:
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'below2Hdistance_genes'] = gene
# elif hd < hd_3:
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'below3Hdistance_genes'] = gene
fname = self.file_tags['experiment_fpath'] / 'tmp' / 'registered_counts' / (self.file_tags['experiment_name'] + '_' + self.file_tags['channel'] + '_decoded_fov_' + self.file_tags['fov'] + '.parquet')
self.barcoded_fov_df.to_parquet(fname,index=False)
class extract_barcodes_NN_test():
"""
Class used to extract the barcodes from the registered
counts using nearest neighbour
Parameters:
-----------
counts: pandas.DataFrame
pandas file with the fov counts after
registration
analysis_parameters: dict
parameters for data processing
experiment_config: Dict
dictionary with the experimental data
codebook_df: pandas.DataFrame
pandas file with the codebook used to
deconvolve the barcode
NB: if there is a problem with the registration the barcode assigned
will be 0*barcode_length
"""
def __init__(self, fov, channel, counts, analysis_parameters:Dict,experiment_config:Dict,codebook_df,status:str):
self.barcodes_extraction_resolution = analysis_parameters['BarcodesExtractionResolution']
self.RegistrationMinMatchingBeads = analysis_parameters['RegistrationMinMatchingBeads']
self.barcode_length = experiment_config['Barcode_length']
self.fov = fov
self.channel = channel
self.counts = counts
self.logger = selected_logger()
self.codebook_df = codebook_df
self.status = status
self.registration_errors = Registration_errors()
@staticmethod
def barcode_nn(counts_df, ref_round_number, barcodes_extraction_resolution):
column_names = list(counts_df.columns.values)
column_names = column_names.append('barcode_reference_dot_id')
barcoded_df = pd.DataFrame(columns=column_names)
reference_array = counts_df.loc[counts_df.round_num == ref_round_number, ['r_px_registered','c_px_registered']].to_numpy()
reference_round_df = counts_df.loc[counts_df.round_num == ref_round_number,:].reset_index(drop=True)
# Step one (all dots not in round 1)
coords_compare = counts_df.loc[counts_df.round_num != ref_round_number, ['r_px_registered','c_px_registered']].to_numpy()
compare_df = counts_df.loc[counts_df.round_num != ref_round_number,:].reset_index(drop=True)
if (reference_array.shape[0] >0) and (coords_compare.shape[0] >0):
# initialize network
nn = NearestNeighbors(n_neighbors=1, metric="euclidean")
nn.fit(reference_array)
# Get the nn
dists, indices = nn.kneighbors(coords_compare, return_distance=True)
# select only the nn that are below barcodes_extraction_resolution distance
idx_selected_coords_compare = np.where(dists <= barcodes_extraction_resolution)[0]
compare_selected_df = compare_df.loc[idx_selected_coords_compare,:]
compare_selected_df['barcode_reference_dot_id'] = np.nan
# ref_idx = indices[idx_selected_coords_compare]
# compare_selected_df.loc[compare_selected_df.index.isin(idx_selected_coords_compare),'barcode_reference_dot_id'] = reference_round_df.loc[ref_idx,'dot_id'].values[0]
for idx in idx_selected_coords_compare:
ref_idx = indices[idx]
compare_selected_df.loc[idx,'barcode_reference_dot_id'] = reference_round_df.loc[ref_idx,'dot_id'].values[0]
reference_round_df['barcode_reference_dot_id'] = reference_round_df.dot_id
barcoded_df = barcoded_df.append([compare_selected_df, reference_round_df], ignore_index=True)
compare_df = compare_df.drop(compare_selected_df.index)
compare_df = compare_df.reset_index(drop=True)
return compare_df, barcoded_df
@staticmethod
def convert_str_codebook(codebook_df,column_name):
codebook_df[column_name] = codebook_df[column_name].map(lambda x: np.frombuffer(x, np.int8))
return codebook_df
@staticmethod
def make_codebook_array(codebook_df,column_name):
codebook_array = np.zeros((len(codebook_df[column_name]),codebook_df[column_name][0].shape[0]))
for idx, el in enumerate(codebook_df[column_name]):
row = codebook_df[column_name][idx]
row = row[np.newaxis,:]
codebook_array[idx,:] = row
return codebook_array
def run_extraction(self):
data_models = Output_models()
registration_errors = Registration_errors()
self.barcoded_fov_df = data_models.barcode_analysis_df
self.barcoded_fov_df.attrs = self.counts.attrs
if self.status == 'FAILED':
error = self.counts['min_number_matching_dots_registration'].values[0]
round_num = self.counts['round_num'].values[0]
self.barcoded_fov_df = self.barcoded_fov_df.append({'min_number_matching_dots_registration':error,
'fov_num':int(self.fov),'dot_channel':self.channel,'round_num': round_num },ignore_index=True)
elif self.status == 'SUCCESS':
if (min(self.counts.loc[:,'min_number_matching_dots_registration']) < self.RegistrationMinMatchingBeads):
round_num = self.counts['round_num'].values[0]
self.barcoded_fov_df = self.barcoded_fov_df.append({'min_number_matching_dots_registration':registration_errors.registration_below_extraction_resolution,
'fov_num':int(self.fov),'dot_channel':self.channel,'round_num': round_num},ignore_index=True)
self.status = 'FAILED'
else:
hd_2 = 2 / self.barcode_length
hd_3 = 3 / self.barcode_length
# barcode_length = len(self.counts['round_num'].unique())
rounds = np.arange(1,self.barcode_length+1)
self.codebook_df = self.convert_str_codebook(self.codebook_df,'Code')
codebook_array = self.make_codebook_array(self.codebook_df,'Code')
nn_sklearn = NearestNeighbors(n_neighbors=1, metric="hamming")
nn_sklearn.fit(codebook_array)
# remove points with np.NAN
# self.counts = self.counts.dropna()
for round_num in rounds:
compare_df, barcoded_df = self.barcode_nn(self.counts, round_num, self.barcodes_extraction_resolution)
self.barcoded_fov_df = self.barcoded_fov_df.append(barcoded_df, ignore_index=True)
self.counts = compare_df
self.counts['barcode_reference_dot_id'] = self.counts.dot_id
self.barcoded_fov_df = self.barcoded_fov_df.append(self.counts, ignore_index=True)
self.barcoded_fov_df['barcodes_extraction_resolution'] = self.barcodes_extraction_resolution
self.grpd = self.barcoded_fov_df.groupby('barcode_reference_dot_id')
barcode_reference_dot_id_list = []
num_unique_dots = np.unique(self.barcoded_fov_df.loc[:,'barcode_reference_dot_id']).shape[0]
# There are no dots is the df
if num_unique_dots > 0:
all_barcodes = np.zeros([num_unique_dots,self.barcode_length],dtype=np.int8)
for idx, (name, group) in enumerate(self.grpd):
barcode_reference_dot_id_list.append(name)
barcode = np.zeros([self.barcode_length],dtype=np.int8)
rounds_num = group.round_num.values
rounds_num = rounds_num.astype(int)
barcode[(rounds_num-1)] += 1
all_barcodes[idx,:] = barcode
dists_arr, index_arr = nn_sklearn.kneighbors(all_barcodes, return_distance=True)
genes=self.codebook_df.loc[index_arr.reshape(index_arr.shape[0]),'Gene'].tolist()
for idx,name in enumerate(barcode_reference_dot_id_list):
barcode = all_barcodes[idx,:]
gene = genes[idx]
hd = dists_arr[idx][0]
cols = ['raw_barcodes','all_Hdistance_genes','number_positive_bits','hamming_distance'] # will add last column depending on hd
writing_data = [barcode.tostring(),gene,barcode.sum(),hd]
if | |
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2001',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_designs_nk_commenters_delete(self, id, nk, **kwargs):
"""
Deletes all commenters of this model.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_commenters_delete(id, nk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_designs_nk_commenters_delete_with_http_info(id, nk, **kwargs)
else:
(data) = self.portals_id_designs_nk_commenters_delete_with_http_info(id, nk, **kwargs)
return data
def portals_id_designs_nk_commenters_delete_with_http_info(self, id, nk, **kwargs):
"""
Deletes all commenters of this model.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_commenters_delete_with_http_info(id, nk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'nk']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_designs_nk_commenters_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_designs_nk_commenters_delete`")
# verify the required parameter 'nk' is set
if ('nk' not in params) or (params['nk'] is None):
raise ValueError("Missing the required parameter `nk` when calling `portals_id_designs_nk_commenters_delete`")
collection_formats = {}
resource_path = '/Portals/{id}/designs/{nk}/commenters'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'nk' in params:
path_params['nk'] = params['nk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_designs_nk_commenters_fk_delete(self, id, nk, fk, **kwargs):
"""
Delete a related item by id for commenters.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_commenters_fk_delete(id, nk, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param str fk: Foreign key for commenters (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_designs_nk_commenters_fk_delete_with_http_info(id, nk, fk, **kwargs)
else:
(data) = self.portals_id_designs_nk_commenters_fk_delete_with_http_info(id, nk, fk, **kwargs)
return data
def portals_id_designs_nk_commenters_fk_delete_with_http_info(self, id, nk, fk, **kwargs):
"""
Delete a related item by id for commenters.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_commenters_fk_delete_with_http_info(id, nk, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param str fk: Foreign key for commenters (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'nk', 'fk']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_designs_nk_commenters_fk_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_designs_nk_commenters_fk_delete`")
# verify the required parameter 'nk' is set
if ('nk' not in params) or (params['nk'] is None):
raise ValueError("Missing the required parameter `nk` when calling `portals_id_designs_nk_commenters_fk_delete`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `portals_id_designs_nk_commenters_fk_delete`")
collection_formats = {}
resource_path = '/Portals/{id}/designs/{nk}/commenters/{fk}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'nk' in params:
path_params['nk'] = params['nk']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_designs_nk_commenters_fk_get(self, id, nk, fk, **kwargs):
"""
Find a related item by id for commenters.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_commenters_fk_get(id, nk, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param str fk: Foreign key for commenters (required)
:return: TeamMember
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_designs_nk_commenters_fk_get_with_http_info(id, nk, fk, **kwargs)
else:
(data) = self.portals_id_designs_nk_commenters_fk_get_with_http_info(id, nk, fk, **kwargs)
return data
def portals_id_designs_nk_commenters_fk_get_with_http_info(self, id, nk, fk, **kwargs):
"""
Find a related item by id for commenters.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_commenters_fk_get_with_http_info(id, nk, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param str fk: Foreign key for commenters (required)
:return: TeamMember
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'nk', 'fk']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_designs_nk_commenters_fk_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_designs_nk_commenters_fk_get`")
# verify the required parameter 'nk' is set
if ('nk' not in params) or (params['nk'] is None):
raise ValueError("Missing the required parameter `nk` when calling `portals_id_designs_nk_commenters_fk_get`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `portals_id_designs_nk_commenters_fk_get`")
collection_formats = {}
resource_path = '/Portals/{id}/designs/{nk}/commenters/{fk}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'nk' in params:
path_params['nk'] = params['nk']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TeamMember',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_designs_nk_commenters_fk_put(self, id, nk, fk, **kwargs):
"""
Update a related | |
= ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(set_silent_user_info_result)
set_silent_user_info_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', [gen_exp, None], None, ), # 1
)
class get_input_history_args(object):
"""
Attributes:
- open_id
- search_key
"""
def __init__(self, open_id=None, search_key=None,):
self.open_id = open_id
self.search_key = search_key
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.open_id = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.search_key = vichele_stay_alone()
self.search_key.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('get_input_history_args')
if self.open_id is not None:
oprot.writeFieldBegin('open_id', TType.STRING, 1)
oprot.writeString(self.open_id.encode('utf-8') if sys.version_info[0] == 2 else self.open_id)
oprot.writeFieldEnd()
if self.search_key is not None:
oprot.writeFieldBegin('search_key', TType.STRUCT, 2)
self.search_key.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(get_input_history_args)
get_input_history_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'open_id', 'UTF8', None, ), # 1
(2, TType.STRUCT, 'search_key', [vichele_stay_alone, None], None, ), # 2
)
class get_input_history_result(object):
"""
Attributes:
- success
- e
"""
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype276, _size273) = iprot.readListBegin()
for _i277 in range(_size273):
_elem278 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
self.success.append(_elem278)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = gen_exp.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('get_input_history_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
for iter279 in self.success:
oprot.writeString(iter279.encode('utf-8') if sys.version_info[0] == 2 else iter279)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(get_input_history_result)
get_input_history_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRING, 'UTF8', False), None, ), # 0
(1, TType.STRUCT, 'e', [gen_exp, None], None, ), # 1
)
class get_company_vichele_info_args(object):
"""
Attributes:
- ssid
- anchor
- status
- enter_date
- stuff_name
- supplier_name
- vichele_number
"""
def __init__(self, ssid=None, anchor=None, status=None, enter_date=None, stuff_name=None, supplier_name=None, vichele_number=None,):
self.ssid = ssid
self.anchor = anchor
self.status = status
self.enter_date = enter_date
self.stuff_name = stuff_name
self.supplier_name = supplier_name
self.vichele_number = vichele_number
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.ssid = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.anchor = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.status = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.enter_date = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.stuff_name = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.supplier_name = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.vichele_number = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('get_company_vichele_info_args')
if self.ssid is not None:
oprot.writeFieldBegin('ssid', TType.STRING, 1)
oprot.writeString(self.ssid.encode('utf-8') if sys.version_info[0] == 2 else self.ssid)
oprot.writeFieldEnd()
if self.anchor is not None:
oprot.writeFieldBegin('anchor', TType.I64, 2)
oprot.writeI64(self.anchor)
oprot.writeFieldEnd()
if self.status is not None:
oprot.writeFieldBegin('status', TType.I64, 3)
oprot.writeI64(self.status)
oprot.writeFieldEnd()
if self.enter_date is not None:
oprot.writeFieldBegin('enter_date', TType.STRING, 4)
oprot.writeString(self.enter_date.encode('utf-8') if sys.version_info[0] == 2 else self.enter_date)
oprot.writeFieldEnd()
if self.stuff_name is not None:
oprot.writeFieldBegin('stuff_name', TType.STRING, 5)
oprot.writeString(self.stuff_name.encode('utf-8') if sys.version_info[0] == 2 else self.stuff_name)
oprot.writeFieldEnd()
if self.supplier_name is not None:
oprot.writeFieldBegin('supplier_name', TType.STRING, 6)
oprot.writeString(self.supplier_name.encode('utf-8') if sys.version_info[0] == 2 else self.supplier_name)
oprot.writeFieldEnd()
if self.vichele_number is not None:
oprot.writeFieldBegin('vichele_number', TType.STRING, 7)
oprot.writeString(self.vichele_number.encode('utf-8') if sys.version_info[0] == 2 else self.vichele_number)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(get_company_vichele_info_args)
get_company_vichele_info_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'ssid', 'UTF8', None, ), # 1
(2, TType.I64, 'anchor', None, None, ), # 2
(3, TType.I64, 'status', None, None, ), # 3
(4, TType.STRING, 'enter_date', 'UTF8', None, ), # 4
(5, TType.STRING, 'stuff_name', 'UTF8', None, ), # 5
(6, TType.STRING, 'supplier_name', 'UTF8', None, ), # 6
(7, TType.STRING, 'vichele_number', 'UTF8', None, ), # 7
)
class get_company_vichele_info_result(object):
"""
Attributes:
- success
- e
"""
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype283, _size280) = iprot.readListBegin()
for _i284 in range(_size280):
_elem285 = vichele_stay_alone()
_elem285.read(iprot)
self.success.append(_elem285)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = gen_exp.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('get_company_vichele_info_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter286 in self.success:
iter286.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(get_company_vichele_info_result)
get_company_vichele_info_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [vichele_stay_alone, None], False), None, ), # 0
(1, TType.STRUCT, 'e', [gen_exp, None], None, ), # 1
)
class confirm_vichele_args(object):
"""
Attributes:
- ssid
- info
- company_for_select
- all_select
- enter_date
- stuff_name
- supplier_name
"""
def __init__(self, ssid=None, info=None, company_for_select=None, all_select=None, enter_date=None, stuff_name=None, supplier_name=None,):
self.ssid = ssid
self.info = info
self.company_for_select = company_for_select
self.all_select = all_select
self.enter_date = enter_date
self.stuff_name = stuff_name
self.supplier_name = supplier_name
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.ssid = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.info = []
(_etype290, _size287) = iprot.readListBegin()
for _i291 in range(_size287):
_elem292 = vichele_stay_alone()
_elem292.read(iprot)
self.info.append(_elem292)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.company_for_select = []
(_etype296, _size293) = iprot.readListBegin()
for _i297 in range(_size293):
_elem298 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
self.company_for_select.append(_elem298)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.all_select = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 5:
if | |
import os
import requests
MB_PASSWORD = os.getenv("<PASSWORD>")
MB_USERNAME = os.getenv("MB_USERNAME")
MB_DOMAIN = os.getenv("MB_DOMAIN")
MB_BASIC_AUTH_USERNAME = os.getenv("MB_BASIC_AUTH_USERNAME")
MB_BASIC_AUTH_PASSWORD = os.getenv("MB_BASIC_AUTH_PASSWORD")
class Metabase_API():
def __init__(self, domain=MB_DOMAIN, email=MB_USERNAME, password=<PASSWORD>, basic_auth=(MB_BASIC_AUTH_USERNAME, MB_BASIC_AUTH_PASSWORD)):
self.domain = domain.rstrip('/')
self.email = email
self.password = password
self.session_id = None
self.header = None
self.auth = basic_auth if any(basic_auth) else None
self.authenticate()
def authenticate(self):
"""Get a Session ID"""
conn_header = {
'username':self.email,
'password':<PASSWORD>
}
res = requests.post(self.domain + '/api/session', json=conn_header, auth=self.auth)
if not res.ok:
raise Exception(res)
self.session_id = res.json()['id']
self.header = {'X-Metabase-Session':self.session_id}
def validate_session(self):
"""Get a new session ID if the previous one has expired"""
res = requests.get(self.domain + '/api/user/current', headers=self.header, auth=self.auth)
if res.ok: # 200
return True
elif res.status_code == 401: # unauthorized
return self.authenticate()
else:
raise Exception(res)
##################################################################
######################### REST Methods ###########################
##################################################################
def get(self, endpoint, *args, **kwargs):
self.validate_session()
res = requests.get(self.domain + endpoint, headers=self.header, **kwargs, auth=self.auth)
if 'raw' in args:
return res
else:
return res.json() if res.ok else False
def post(self, endpoint, *args, **kwargs):
self.validate_session()
res = requests.post(self.domain + endpoint, headers=self.header, **kwargs, auth=self.auth)
if 'raw' in args:
return res
else:
return res.json() if res.ok else False
def put(self, endpoint, *args, **kwargs):
"""Used for updating objects (cards, dashboards, ...)"""
self.validate_session()
res = requests.put(self.domain + endpoint, headers=self.header, **kwargs, auth=self.auth)
if 'raw' in args:
return res
else:
return res.status_code
def delete(self, endpoint, *args, **kwargs):
self.validate_session()
res = requests.delete(self.domain + endpoint, headers=self.header, **kwargs, auth=self.auth)
if 'raw' in args:
return res
else:
return res.status_code
###############################################################
##################### Helper Functions ########################
###############################################################
def get_item_info(self, item_type
, item_id=None, item_name=None
, collection_id=None, collection_name=None
, params=None):
'''
Return the info for the given item.
Use 'params' for providing arguments. E.g. to include tables in the result for databases, use: params={'include':'tables'}
'''
assert item_type in ['database', 'table', 'card', 'collection', 'dashboard', 'pulse', 'segment']
if params:
assert type(params) == dict
if not item_id:
if not item_name:
raise ValueError('Either the name or id of the {} must be provided.'.format(item_type))
item_id = self.get_item_id(item_type, item_name, collection_id=collection_id, collection_name=collection_name)
res = self.get("/api/{}/{}".format(item_type, item_id), params=params)
if res:
return res
else:
raise ValueError('There is no {} with the id "{}"'.format(item_type, item_id))
def get_item_name(self, item_type, item_id):
assert item_type in ['database', 'table', 'card', 'collection', 'dashboard', 'pulse', 'segment']
res = self.get("/api/{}/{}".format(item_type, item_id))
if res:
return res['name']
else:
raise ValueError('There is no {} with the id "{}"'.format(item_type, item_id))
def get_item_id(self, item_type, item_name, collection_id=None, collection_name=None, db_id=None, db_name=None, table_id=None):
assert item_type in ['database', 'table', 'card', 'collection', 'dashboard', 'pulse', 'segment']
if item_type in ['card', 'dashboard', 'pulse']:
if not collection_id:
if not collection_name:
# Collection name/id is not provided. Searching in all collections
item_IDs = [ i['id'] for i in self.get("/api/{}/".format(item_type)) if i['name'] == item_name
and i['archived'] == False ]
else:
collection_id = self.get_item_id('collection', collection_name) if collection_name != 'root' else None
item_IDs = [ i['id'] for i in self.get("/api/{}/".format(item_type)) if i['name'] == item_name
and i['collection_id'] == collection_id
and i['archived'] == False ]
else:
collection_name = self.get_item_name('collection', collection_id)
item_IDs = [ i['id'] for i in self.get("/api/{}/".format(item_type)) if i['name'] == item_name
and i['collection_id'] == collection_id
and i['archived'] == False ]
if len(item_IDs) > 1:
if not collection_name:
raise ValueError('There is more than one {} with the name "{}".\n\
Provide collection id/name to limit the search space'.format(item_type, item_name))
raise ValueError('There is more than one {} with the name "{}" in the collection "{}"'
.format(item_type, item_name, collection_name))
if len(item_IDs) == 0:
if not collection_name:
raise ValueError('There is no {} with the name "{}"'.format(item_type, item_name))
raise ValueError('There is no item with the name "{}" in the collection "{}"'
.format(item_name, collection_name))
return item_IDs[0]
if item_type == 'collection':
collection_IDs = [ i['id'] for i in self.get("/api/collection/") if i['name'] == item_name ]
if len(collection_IDs) > 1:
raise ValueError('There is more than one collection with the name "{}"'.format(item_name))
if len(collection_IDs) == 0:
raise ValueError('There is no collection with the name "{}"'.format(item_name))
return collection_IDs[0]
if item_type == 'database':
res = self.get("/api/database/")
if type(res) == dict: # in Metabase version *.40.0 the format of the returned result for this endpoint changed
res = res['data']
db_IDs = [ i['id'] for i in res if i['name'] == item_name ]
if len(db_IDs) > 1:
raise ValueError('There is more than one DB with the name "{}"'.format(item_name))
if len(db_IDs) == 0:
raise ValueError('There is no DB with the name "{}"'.format(item_name))
return db_IDs[0]
if item_type == 'table':
tables = self.get("/api/table/")
if db_id:
table_IDs = [ i['id'] for i in tables if i['name'] == item_name and i['db']['id'] == db_id ]
elif db_name:
table_IDs = [ i['id'] for i in tables if i['name'] == item_name and i['db']['name'] == db_name ]
else:
table_IDs = [ i['id'] for i in tables if i['name'] == item_name ]
if len(table_IDs) > 1:
raise ValueError('There is more than one table with the name {}. Provide db id/name.'.format(item_name))
if len(table_IDs) == 0:
raise ValueError('There is no table with the name "{}" (in the provided db, if any)'.format(item_name))
return table_IDs[0]
if item_type == 'segment':
segment_IDs = [ i['id'] for i in self.get("/api/segment/") if i['name'] == item_name
and (not table_id or i['table_id'] == table_id) ]
if len(segment_IDs) > 1:
raise ValueError('There is more than one segment with the name "{}"'.format(item_name))
if len(segment_IDs) == 0:
raise ValueError('There is no segment with the name "{}"'.format(item_name))
return segment_IDs[0]
def get_collection_id(self, collection_name):
import warnings
warnings.warn("The function get_collection_id will be removed in the next version. Use get_item_id function instead.", DeprecationWarning)
collection_IDs = [ i['id'] for i in self.get("/api/collection/") if i['name'] == collection_name ]
if len(collection_IDs) > 1:
raise ValueError('There is more than one collection with the name "{}"'.format(collection_name))
if len(collection_IDs) == 0:
raise ValueError('There is no collection with the name "{}"'.format(collection_name))
return collection_IDs[0]
def get_db_id(self, db_name):
import warnings
warnings.warn("The function get_db_id will be removed in the next version. Use get_item_id function instead.", DeprecationWarning)
res = self.get("/api/database/")
if type(res) == dict: # in Metabase version *.40.0 the format of the returned result for this endpoint changed
res = res['data']
db_IDs = [ i['id'] for i in res if i['name'] == db_name ]
if len(db_IDs) > 1:
raise ValueError('There is more than one DB with the name "{}"'.format(db_name))
if len(db_IDs) == 0:
raise ValueError('There is no DB with the name "{}"'.format(db_name))
return db_IDs[0]
def get_table_id(self, table_name, db_name=None, db_id=None):
import warnings
warnings.warn("The function get_table_id will be removed in the next version. Use get_item_id function instead.", DeprecationWarning)
tables = self.get("/api/table/")
if db_id:
table_IDs = [ i['id'] for i in tables if i['name'] == table_name and i['db']['id'] == db_id ]
elif db_name:
table_IDs = [ i['id'] for i in tables if i['name'] == table_name and i['db']['name'] == db_name ]
else:
table_IDs = [ i['id'] for i in tables if i['name'] == table_name ]
if len(table_IDs) > 1:
raise ValueError('There is more than one table with the name {}. Provide db id/name.'.format(table_name))
if len(table_IDs) == 0:
raise ValueError('There is no table with the name "{}" (in the provided db, if any)'.format(table_name))
return table_IDs[0]
def get_segment_id(self, segment_name, table_id=None):
import warnings
warnings.warn("The function get_segment_id will be removed in the next version. Use get_item_id function instead.", DeprecationWarning)
segment_IDs = [ i['id'] for i in self.get("/api/segment/") if i['name'] == segment_name
and (not table_id or i['table_id'] == table_id) ]
if len(segment_IDs) > 1:
raise ValueError('There is more than one segment with the name "{}"'.format(segment_name))
if len(segment_IDs) == 0:
raise ValueError('There is no segment with the name "{}"'.format(segment_name))
return segment_IDs[0]
def get_db_id_from_table_id(self, table_id):
tables = [ i['db_id'] for i in self.get("/api/table/") if i['id'] == table_id ]
if len(tables) == 0:
raise ValueError('There is no DB containing the table with the ID "{}"'.format(table_id))
return tables[0]
def get_db_info(self, db_name=None, db_id=None, params=None):
'''
Return Database info. Use 'params' for providing arguments.
For example to include tables in the result, use: params={'include':'tables'}
'''
import warnings
warnings.warn("The function get_db_info will be removed in the next version. Use get_item_info function instead.", DeprecationWarning)
if params:
assert type(params) == dict
if not db_id:
if not db_name:
raise ValueError('Either the name or id of the DB needs to be provided.')
db_id = self.get_item_id('database', db_name)
| |
"""
FindRepoTask
--------------------------------------------------
Run tool to find repos that should be taken down
1. Tool should group repos by owner, if one owner has multiple repos in violation
2. Newly found repos are tagged as “new”
"""
from .BaseTask import BaseTask
from takedown.client.GitHub import GitHubClient
import sys
import requests
import datetime
class FindRepoTask(BaseTask):
def __init__(self, **config):
super().__init__(**config)
self.__dict__.update(config)
self.client = GitHubClient()
self.__token = ""
self.__is_authenticated = False
self.search_query = ""
self.previous_records = None
# save rate limit and bandwidth with GitHub requests, cache user_info
self.cached_user_info = {}
def prepare(self, token: str, search_query: str, previous_records: dict = None):
"""
prepare the task
:param token: input github token
:param search_query: input search_query
:param previous_records: previous records of searched repos
:return: self instance
"""
self.client.authenticate(token)
self.__token = token
self.__is_authenticated = True
self.search_query = search_query
self.previous_records = previous_records
return self
def __pre_check__(self, ignore_warning: bool = False):
"""
run before execute
:return: false if check failed
"""
if self.search_query == "":
return False
if len(self.search_query) < 5 and not ignore_warning:
confirm = None
while not confirm:
confirm = input("The length of search query `{}` is too short that will produce massive search results."
" Are you sure to proceed? [y/n]\n".format(self.search_query))
if confirm.lower() not in ['y', 'n']:
confirm = None
print("Please enter 'y' or 'n': ")
elif confirm.lower() == 'n':
return False
if not self.__is_authenticated:
print("No token provided for GitHub client.", file=sys.stderr)
return False
return True
def execute_search_by_code(self, ignore_warning: bool = False, chain: bool = False):
"""
search by code
:param chain: true if used in multiple targets
:param ignore_warning:
:return:
"""
# pre-check
if not self.__pre_check__(ignore_warning):
return None
# try to fire one request
print("Start searching for code...")
first_result = self.client.search(self.search_query, "code", )
if not first_result:
print("An error occurs, abort program", file=sys.stderr)
return None
else:
print("Results retrieved from GitHub. Page: 1.")
if first_result.total > 500 and not ignore_warning:
confirm = None
while not confirm:
confirm = input("The number of search results is {}. It is so large that you may narrow search queries."
" The max retrievable number is 1000. Are you sure to proceed? [y/n]\n"
.format(first_result.total))
if confirm.lower() not in ['y', 'n']:
confirm = None
print("Please enter 'y' or 'n': ")
elif confirm.lower() == 'n':
return None
fields_filtered_results = []
code_search_result = first_result
total = first_result.total
page = 2 # page 1 has been requested
while total > (page - 1) * 100 and page <= 10:
fields_filtered_results = [*code_search_result.generate_list([
"owner__url", "repo__name", "repo__html_url",
]), *fields_filtered_results]
code_search_result = self.client.search(self.search_query, "code", page=page)
if not code_search_result:
print("Error in search with GitHub rest APIs", file=sys.stderr)
return None
else:
print("Results retrieved from GitHub. Page: {}.".format(page))
page += 1
fields_filtered_results = [*code_search_result.generate_list([
"owner__url", "owner__html_url", "repo__name", "repo__html_url",
]), *fields_filtered_results]
print("Retrieving additional information of users...")
processed_results = []
# cache repeated user info to save request rate
cached_user_info = self.cached_user_info
# cache repo html url to ensure each result is unique after processing
repo_set = set()
# process result by adding user info
for result in fields_filtered_results:
if result["repo__html_url"] not in repo_set:
res = cached_user_info.get(result["owner__url"], None)
if not res:
res = requests.get(result["owner__url"], headers={
'user-agent': 'python',
'Authorization': "token {}".format(self.__token)
}).json()
cached_user_info[result["owner__url"]] = res
processed_results.append({
**result,
"owner__email": res.get("email", None),
"owner__name": res.get("name", None),
"owner__username": res.get("login", None),
"owner__html_url": res.get("html_url", None)
})
repo_set.add(result["repo__html_url"])
print("Processing results...")
final_result_dict = {}
for result in processed_results:
if result["owner__username"] in final_result_dict:
repos = final_result_dict[result["owner__username"]]["repos"]
# if repo already exist
if result["repo__name"] in repos:
# update history
repos[result["repo__name"]]["history"].append(
{
"date": repos[result["repo__name"]]["date"],
"status": repos[result["repo__name"]]["status"]
}
)
repos[result["repo__name"]]["status"] = "Redetected"
repos[result["repo__name"]]["date"] = str(datetime.datetime.now())
else:
repos[result["repo__name"]] = {
"repo__name": result["repo__name"],
"repo__html_url": result["repo__html_url"],
"status": "New",
"date": str(datetime.datetime.now()),
"history": []
}
elif self.previous_records and result["owner__username"] in self.previous_records:
previous_record = self.previous_records[result["owner__username"]]
repos = previous_record["repos"]
# if repo already exist
if result["repo__name"] in repos:
repos[result["repo__name"]]["history"].append(
{
"date": repos[result["repo__name"]]["date"],
"status": repos[result["repo__name"]]["status"]
}
)
repos[result["repo__name"]]["status"] = "Redetected"
repos[result["repo__name"]]["date"] = str(datetime.datetime.now())
else:
repos[result["repo__name"]] = {
"repo__name": result["repo__name"],
"repo__html_url": result["repo__html_url"],
"status": "New",
"date": str(datetime.datetime.now()),
"history": []
}
final_result_dict[result["owner__username"]] = previous_record
else:
final_result_dict[result["owner__username"]] = {
"owner__username": result["owner__username"],
"owner__name": result["owner__name"],
"owner__email": [result["owner__email"]],
"owner__html_url": result["owner__html_url"],
"repos": {
result["repo__name"]: {
"repo__name": result["repo__name"],
"repo__html_url": result["repo__html_url"],
"status": "New",
"date": str(datetime.datetime.now()),
"history": []
}
}
}
if chain:
return final_result_dict
final_result = {
"results": []
}
for user in final_result_dict.values():
repos = user.pop("repos")
final_result["results"].append(
{
**user,
"repos": [
{**repo_info} for repo_info in repos.values()
]
}
)
return final_result
def execute_search_by_repo(self, ignore_warning: bool = False, chain: bool = False):
"""
search by code
:param ignore_warning:
:return:
"""
# pre-check
if not self.__pre_check__(ignore_warning):
return None
# try to fire one request
print("Start searching for repo...")
first_result = self.client.search(self.search_query, "repo", )
if not first_result:
print("An error occurs, abort program", file=sys.stderr)
return None
else:
print("Results retrieved from GitHub. Page: 1.")
if first_result.total > 500 and not ignore_warning:
confirm = None
while not confirm:
confirm = input("The number of search results is {}. It is so large that you may narrow search queries."
" The max retrievable number is 1000. Are you sure to proceed? [y/n]\n"
.format(first_result.total))
if confirm.lower() not in ['y', 'n']:
confirm = None
print("Please enter 'y' or 'n': ")
elif confirm.lower() == 'n':
return None
fields_filtered_results = []
code_search_result = first_result
total = first_result.total
page = 2 # page 1 has been requested
while total > (page - 1) * 100 and page <= 10:
fields_filtered_results = [*code_search_result.generate_list([
"owner__url", "repo__name", "repo__html_url",
]), *fields_filtered_results]
code_search_result = self.client.search(self.search_query, "repo", page=page)
if not code_search_result:
print("Error in search with GitHub rest APIs", file=sys.stderr)
return None
else:
print("Results retrieved from GitHub. Page: {}.".format(page))
page += 1
fields_filtered_results = [*code_search_result.generate_list([
"owner__url", "owner__html_url", "repo__name", "repo__html_url",
]), *fields_filtered_results]
print("Retrieving additional information of users...")
processed_results = []
# cache repeated user info to save request rate
# repo results are unique returned by GitHub
cached_user_info = self.cached_user_info
# cache repo html url to ensure each result is unique after processing
for result in fields_filtered_results:
res = cached_user_info.get(result["owner__url"], None)
if not res:
res = requests.get(result["owner__url"], headers={
'user-agent': 'python',
'Authorization': "token {}".format(self.__token)
}).json()
cached_user_info[result["owner__url"]] = res
processed_results.append({
**result,
"owner__email": res.get("email", None),
"owner__name": res.get("name", None),
"owner__username": res.get("login", None),
"owner__html_url": res.get("html_url", None)
})
print("Processing results...")
final_result_dict = {}
for result in processed_results:
if result["owner__username"] in final_result_dict:
repos = final_result_dict[result["owner__username"]]["repos"]
# if repo already exist
if result["repo__name"] in repos:
repos[result["repo__name"]]["history"].append(
{
"date": repos[result["repo__name"]]["date"],
"status": repos[result["repo__name"]]["status"]
}
)
repos[result["repo__name"]]["status"] = "Redetected"
repos[result["repo__name"]]["date"] = str(datetime.datetime.now())
else:
repos[result["repo__name"]] = {
"repo__name": result["repo__name"],
"repo__html_url": result["repo__html_url"],
"status": "New",
"date": str(datetime.datetime.now()),
"history": []
}
elif self.previous_records and result["owner__username"] in self.previous_records:
previous_record = self.previous_records[result["owner__username"]]
repos = previous_record["repos"]
# if repo already exist
if result["repo__name"] in repos:
repos[result["repo__name"]]["history"].append(
{
"date": repos[result["repo__name"]]["date"],
"status": repos[result["repo__name"]]["status"]
}
)
repos[result["repo__name"]]["status"] = "Redetected"
repos[result["repo__name"]]["date"] = str(datetime.datetime.now())
else:
repos[result["repo__name"]] = {
"repo__name": result["repo__name"],
"repo__html_url": result["repo__html_url"],
"status": "New",
"date": str(datetime.datetime.now()),
"history": []
}
final_result_dict[result["owner__username"]] = previous_record
else:
final_result_dict[result["owner__username"]] = {
"owner__username": result["owner__username"],
"owner__name": result["owner__name"],
"owner__email": [result["owner__email"]],
"owner__html_url": result["owner__html_url"],
"repos": {
result["repo__name"]: {
"repo__name": result["repo__name"],
"repo__html_url": result["repo__html_url"],
"status": "New",
"date": str(datetime.datetime.now()),
"history": []
}
}
}
if chain:
return final_result_dict
final_result = {
"results": []
}
for user in final_result_dict.values():
repos = user.pop("repos")
final_result["results"].append(
{
**user,
"repos": [
{**repo_info} for repo_info in repos.values()
]
}
)
return final_result
def merge(self, results: list) -> dict:
"""
merge intermediate results
:param results:
:return:
"""
# merged results
processed_results = []
# map that used to <owner_username : index_in_processed_array>
cache_user = {}
# set that used to store visited repos
cache_set = set()
for result in results:
for record in result.keys():
curr = result[record]
if record in cache_user:
processed_result = processed_results[cache_user[record]]
for repo_name in curr["repos"].keys():
if "{}/{}".format(record, repo_name) not in cache_set:
processed_result["repos"].append(
curr["repos"][repo_name]
)
cache_set.add("{}/{}".format(record, repo_name))
else:
cache_user[record] = len(processed_results)
repos = curr.get("repos")
processed_results.append(
{
**curr,
"repos": [
{**repo_info} for repo_info in repos.values()
]
}
)
for repo in repos.keys():
cache_set.add("{}/{}".format(record, repo))
return {"results": processed_results}
def execute(self, targets=None, ignore_warning: bool = False, chain: bool = False):
"""
general execution function
:param targets: "repo" or "code" or ["repo", "code"], by default "code"
:param ignore_warning:
:param chain:
:return:
"""
if not targets:
return self.execute_search_by_code(ignore_warning=ignore_warning, chain=chain)
elif isinstance(targets, list):
| |
# coding: utf-8
import logging
import requests
import mimetypes
from io import BytesIO
from urllib.parse import urlparse
from datetime import datetime, timedelta
from collections import OrderedDict
from flask_babelex import gettext as _
from flask import (
render_template,
abort,
current_app,
request,
session,
redirect,
jsonify,
url_for,
Response,
send_from_directory,
g,
make_response,
)
from werkzeug.contrib.atom import AtomFeed
from urllib.parse import urljoin
from legendarium.formatter import descriptive_short_format
from . import main
from webapp import babel
from webapp import cache
from webapp import controllers
from webapp.choices import STUDY_AREAS
from webapp.utils import utils
from webapp.utils.caching import cache_key_with_lang, cache_key_with_lang_with_qs
from webapp import forms
from webapp.config.lang_names import display_original_lang_name
from opac_schema.v1.models import Journal, Issue, Article, Collection
from lxml import etree
from packtools import HTMLGenerator
logger = logging.getLogger(__name__)
JOURNAL_UNPUBLISH = _("O periódico está indisponível por motivo de: ")
ISSUE_UNPUBLISH = _("O número está indisponível por motivo de: ")
ARTICLE_UNPUBLISH = _("O artigo está indisponível por motivo de: ")
IAHX_LANGS = dict(
p='pt',
e='es',
i='en',
)
def url_external(endpoint, **kwargs):
url = url_for(endpoint, **kwargs)
return urljoin(request.url_root, url)
class RetryableError(Exception):
"""Erro recuperável sem que seja necessário modificar o estado dos dados
na parte cliente, e.g., timeouts, erros advindos de particionamento de rede
etc.
"""
class NonRetryableError(Exception):
"""Erro do qual não pode ser recuperado sem modificar o estado dos dados
na parte cliente, e.g., recurso solicitado não exite, URI inválida etc.
"""
def fetch_data(url: str, timeout: float = 2) -> bytes:
try:
response = requests.get(url, timeout=timeout)
except (requests.ConnectionError, requests.Timeout) as exc:
raise RetryableError(exc) from exc
except (requests.InvalidSchema, requests.MissingSchema, requests.InvalidURL) as exc:
raise NonRetryableError(exc) from exc
else:
try:
response.raise_for_status()
except requests.HTTPError as exc:
if 400 <= exc.response.status_code < 500:
raise NonRetryableError(exc) from exc
elif 500 <= exc.response.status_code < 600:
raise RetryableError(exc) from exc
else:
raise
return response.content
@main.before_app_request
def add_collection_to_g():
if not hasattr(g, 'collection'):
try:
collection = controllers.get_current_collection()
setattr(g, 'collection', collection)
except Exception:
# discutir o que fazer aqui
setattr(g, 'collection', {})
@main.after_request
def add_header(response):
response.headers['x-content-type-options'] = 'nosniff'
return response
@main.after_request
def add_language_code(response):
language = session.get('lang', get_locale())
response.set_cookie('language', language)
return response
@main.before_app_request
def add_forms_to_g():
setattr(g, 'email_share', forms.EmailShareForm())
setattr(g, 'email_contact', forms.ContactForm())
setattr(g, 'error', forms.ErrorForm())
@main.before_app_request
def add_scielo_org_config_to_g():
language = session.get('lang', get_locale())
scielo_org_links = {
key: url[language]
for key, url in current_app.config.get('SCIELO_ORG_URIS', {}).items()
}
setattr(g, 'scielo_org', scielo_org_links)
@babel.localeselector
def get_locale():
langs = current_app.config.get('LANGUAGES')
lang_from_headers = request.accept_languages.best_match(list(langs.keys()))
if 'lang' not in list(session.keys()):
session['lang'] = lang_from_headers
if not lang_from_headers and not session['lang']:
# Caso não seja possível detectar o idioma e não tenhamos a chave lang
# no seção, fixamos o idioma padrão.
session['lang'] = current_app.config.get('BABEL_DEFAULT_LOCALE')
return session['lang']
@main.route('/set_locale/<string:lang_code>/')
def set_locale(lang_code):
langs = current_app.config.get('LANGUAGES')
if lang_code not in list(langs.keys()):
abort(400, _('Código de idioma inválido'))
referrer = request.referrer
hash = request.args.get('hash')
if hash:
referrer += "#" + hash
# salvar o lang code na sessão
session['lang'] = lang_code
return redirect(referrer)
def get_lang_from_session():
"""
Tenta retornar o idioma da seção, caso não consiga retorna
BABEL_DEFAULT_LOCALE.
"""
try:
return session['lang']
except KeyError:
return current_app.config.get('BABEL_DEFAULT_LOCALE')
@main.route('/')
@cache.cached(key_prefix=cache_key_with_lang)
def index():
language = session.get('lang', get_locale())
news = controllers.get_latest_news_by_lang(language)
tweets = controllers.get_collection_tweets()
press_releases = controllers.get_press_releases({'language': language})
urls = {
'downloads': '{0}/w/accesses?collection={1}'.format(
current_app.config['METRICS_URL'],
current_app.config['OPAC_COLLECTION']),
'references': '{0}/w/publication/size?collection={1}'.format(
current_app.config['METRICS_URL'],
current_app.config['OPAC_COLLECTION']),
'other': '{0}/?collection={1}'.format(
current_app.config['METRICS_URL'],
current_app.config['OPAC_COLLECTION'])
}
if (
g.collection is not None
and isinstance(g.collection, Collection)
and g.collection.metrics is not None
and current_app.config['USE_HOME_METRICS']
):
g.collection.metrics.total_journal = Journal.objects.filter(
is_public=True, current_status="current"
).count()
g.collection.metrics.total_article = Article.objects.filter(
is_public=True
).count()
context = {
'news': news,
'urls': urls,
'tweets': tweets,
'press_releases': press_releases,
}
return render_template("collection/index.html", **context)
# ##################################Collection###################################
@main.route('/journals/alpha')
@cache.cached(key_prefix=cache_key_with_lang)
def collection_list():
allowed_filters = ["current", "no-current", ""]
query_filter = request.args.get("status", "")
if not query_filter in allowed_filters:
query_filter = ""
journals_list = [
controllers.get_journal_json_data(journal)
for journal in controllers.get_journals(query_filter=query_filter)
]
return render_template("collection/list_journal.html",
**{'journals_list': journals_list, 'query_filter': query_filter})
@main.route("/journals/thematic")
@cache.cached(key_prefix=cache_key_with_lang)
def collection_list_thematic():
allowed_query_filters = ["current", "no-current", ""]
allowed_thematic_filters = ["areas", "wos", "publisher"]
thematic_table = {
"areas": "study_areas",
"wos": "subject_categories",
"publisher": "publisher_name",
}
query_filter = request.args.get("status", "")
title_query = request.args.get("query", "")
thematic_filter = request.args.get("filter", "areas")
if not query_filter in allowed_query_filters:
query_filter = ""
if not thematic_filter in allowed_thematic_filters:
thematic_filter = "areas"
lang = get_lang_from_session()[:2].lower()
objects = controllers.get_journals_grouped_by(
thematic_table[thematic_filter],
title_query,
query_filter=query_filter,
lang=lang,
)
return render_template(
"collection/list_thematic.html",
**{"objects": objects, "query_filter": query_filter, "filter": thematic_filter}
)
@main.route('/journals/feed/')
@cache.cached(key_prefix=cache_key_with_lang)
def collection_list_feed():
language = session.get('lang', get_locale())
collection = controllers.get_current_collection()
title = 'SciELO - %s - %s' % (collection.name, _('Últimos periódicos inseridos na coleção'))
subtitle = _('10 últimos periódicos inseridos na coleção %s' % collection.name)
feed = AtomFeed(title,
subtitle=subtitle,
feed_url=request.url, url=request.url_root)
journals = controllers.get_journals_paginated(
title_query='', page=1, order_by='-created', per_page=10)
if not journals.items:
feed.add('Nenhum periódico encontrado',
url=request.url,
updated=datetime.now())
for journal in journals.items:
issues = controllers.get_issues_by_jid(journal.jid, is_public=True)
last_issue = issues[0] if issues else None
articles = []
if last_issue:
articles = controllers.get_articles_by_iid(last_issue.iid,
is_public=True)
result_dict = OrderedDict()
for article in articles:
section = article.get_section_by_lang(language[:2])
result_dict.setdefault(section, [])
result_dict[section].append(article)
context = {
'journal': journal,
'articles': result_dict,
'language': language,
'last_issue': last_issue
}
feed.add(journal.title,
render_template("collection/list_feed_content.html", **context),
content_type='html',
author=journal.publisher_name,
url=url_external('main.journal_detail', url_seg=journal.url_segment),
updated=journal.updated,
published=journal.created)
return feed.get_response()
@main.route("/about/", methods=['GET'])
@main.route('/about/<string:slug_name>', methods=['GET'])
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def about_collection(slug_name=None):
language = session.get('lang', get_locale())
context = {}
page = None
if slug_name:
# caso seja uma página
page = controllers.get_page_by_slug_name(slug_name, language)
if not page:
abort(404, _('Página não encontrada'))
context['page'] = page
else:
# caso não seja uma página é uma lista
pages = controllers.get_pages_by_lang(language)
context['pages'] = pages
return render_template("collection/about.html", **context)
# ###################################Journal#####################################
@main.route('/scielo.php/')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def router_legacy():
script_php = request.args.get('script', None)
pid = request.args.get('pid', None)
tlng = request.args.get('tlng', None)
allowed_scripts = [
'sci_serial', 'sci_issuetoc', 'sci_arttext', 'sci_abstract', 'sci_issues', 'sci_pdf'
]
if (script_php is not None) and (script_php in allowed_scripts) and not pid:
# se tem pelo menos um param: pid ou script_php
abort(400, _(u'Requsição inválida ao tentar acessar o artigo com pid: %s' % pid))
elif script_php and pid:
if script_php == 'sci_serial':
# pid = issn
journal = controllers.get_journal_by_issn(pid)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
return redirect(url_for('main.journal_detail',
url_seg=journal.url_segment), code=301)
elif script_php == 'sci_issuetoc':
issue = controllers.get_issue_by_pid(pid)
if not issue:
abort(404, _('Número não encontrado'))
if not issue.is_public:
abort(404, ISSUE_UNPUBLISH + _(issue.unpublish_reason))
if not issue.journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(issue.journal.unpublish_reason))
if issue.url_segment and "ahead" in issue.url_segment:
return redirect(
url_for('main.aop_toc', url_seg=url_seg), code=301)
return redirect(
url_for(
"main.issue_toc",
url_seg=issue.journal.url_segment,
url_seg_issue=issue.url_segment),
301
)
elif script_php == 'sci_arttext' or script_php == 'sci_abstract':
article = controllers.get_article_by_pid_v2(pid)
if not article:
abort(404, _('Artigo não encontrado'))
# 'abstract' or None (not False, porque False converterá a string 'False')
part = (script_php == 'sci_abstract' and 'abstract') or None
if tlng not in article.languages:
tlng = article.original_language
return redirect(url_for('main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article.aid,
part=part,
lang=tlng),
code=301)
elif script_php == 'sci_issues':
journal = controllers.get_journal_by_issn(pid)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
return redirect(url_for('main.issue_grid',
url_seg=journal.url_segment), 301)
elif script_php == 'sci_pdf':
# accesso ao pdf do artigo:
article = controllers.get_article_by_pid_v2(pid)
if not article:
abort(404, _('Artigo não encontrado'))
return redirect(
url_for(
'main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article.aid,
format='pdf',
),
code=301
)
else:
abort(400, _(u'Requsição inválida ao tentar acessar o artigo com pid: %s' % pid))
else:
return redirect('/')
@main.route('/<string:journal_seg>')
@main.route('/journal/<string:journal_seg>')
def journal_detail_legacy_url(journal_seg):
return redirect(url_for('main.journal_detail',
url_seg=journal_seg), code=301)
@main.route('/j/<string:url_seg>/')
@cache.cached(key_prefix=cache_key_with_lang)
def journal_detail(url_seg):
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
utils.fix_journal_last_issue(journal)
# todo: ajustar para que seja só noticias relacionadas ao periódico
language = session.get('lang', get_locale())
news = controllers.get_latest_news_by_lang(language)
# Press releases
press_releases = controllers.get_press_releases({
'journal': journal,
'language': language})
# Lista de seções
# Mantendo sempre o idioma inglês para as seções na página incial do periódico
if journal.last_issue and journal.current_status == "current":
sections = [section for section in journal.last_issue.sections if section.language == 'en']
recent_articles = controllers.get_recent_articles_of_issue(journal.last_issue.iid, is_public=True)
else:
sections = []
recent_articles = []
latest_issue = journal.last_issue
if latest_issue:
latest_issue_legend = descriptive_short_format(
title=journal.title, short_title=journal.short_title,
pubdate=str(latest_issue.year), volume=latest_issue.volume, number=latest_issue.number,
suppl=latest_issue.suppl_text, language=language[:2].lower())
else:
latest_issue_legend = ''
journal_metrics = controllers.get_journal_metrics(journal)
context = {
'journal': journal,
'press_releases': press_releases,
'recent_articles': recent_articles,
'journal_study_areas': [
STUDY_AREAS.get(study_area.upper()) for study_area in journal.study_areas
],
# o primiero item da lista é o último número.
# condicional para verificar se issues contém itens
'last_issue': latest_issue,
'latest_issue_legend': latest_issue_legend,
'sections': sections if sections else None,
'news': news,
'journal_metrics': journal_metrics
}
return render_template("journal/detail.html", **context)
@main.route('/journal/<string:url_seg>/feed/')
@cache.cached(key_prefix=cache_key_with_lang)
def journal_feed(url_seg):
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
issues = controllers.get_issues_by_jid(journal.jid, is_public=True)
last_issue = issues[0] if issues else None
articles = controllers.get_articles_by_iid(last_issue.iid, is_public=True)
feed = AtomFeed(journal.title,
feed_url=request.url,
url=request.url_root,
subtitle=utils.get_label_issue(last_issue))
feed_language = session.get('lang', get_locale())
feed_language = feed_language[:2].lower()
for article in articles:
# ######### TODO: Revisar #########
article_lang = feed_language
if feed_language not in article.languages:
article_lang = article.original_language
feed.add(article.title or _('Artigo sem título'),
render_template("issue/feed_content.html", article=article),
content_type='html',
id=article.doi or article.pid,
| |
colg.aggregate(how, _level=(_level or 0) + 1)
def _agg_2dim(name, how):
"""
aggregate a 2-dim with how
"""
colg = self._gotitem(self._selection, ndim=2,
subset=obj)
return colg.aggregate(how, _level=None)
def _agg(arg, func):
"""
run the aggregations over the arg with func
return an OrderedDict
"""
result = compat.OrderedDict()
for fname, agg_how in compat.iteritems(arg):
result[fname] = func(fname, agg_how)
return result
# set the final keys
keys = list(compat.iterkeys(arg))
result = compat.OrderedDict()
# nested renamer
if is_nested_renamer:
result = list(_agg(arg, _agg_1dim).values())
if all(isinstance(r, dict) for r in result):
result, results = compat.OrderedDict(), result
for r in results:
result.update(r)
keys = list(compat.iterkeys(result))
else:
if self._selection is not None:
keys = None
# some selection on the object
elif self._selection is not None:
sl = set(self._selection_list)
# we are a Series like object,
# but may have multiple aggregations
if len(sl) == 1:
result = _agg(arg, lambda fname,
agg_how: _agg_1dim(self._selection, agg_how))
# we are selecting the same set as we are aggregating
elif not len(sl - set(keys)):
result = _agg(arg, _agg_1dim)
# we are a DataFrame, with possibly multiple aggregations
else:
result = _agg(arg, _agg_2dim)
# no selection
else:
try:
result = _agg(arg, _agg_1dim)
except SpecificationError:
# we are aggregating expecting all 1d-returns
# but we have 2d
result = _agg(arg, _agg_2dim)
# combine results
def is_any_series():
# return a boolean if we have *any* nested series
return any(isinstance(r, ABCSeries)
for r in compat.itervalues(result))
def is_any_frame():
# return a boolean if we have *any* nested series
return any(isinstance(r, ABCDataFrame)
for r in compat.itervalues(result))
if isinstance(result, list):
return concat(result, keys=keys, axis=1, sort=True), True
elif is_any_frame():
# we have a dict of DataFrames
# return a MI DataFrame
return concat([result[k] for k in keys],
keys=keys, axis=1), True
elif isinstance(self, ABCSeries) and is_any_series():
# we have a dict of Series
# return a MI Series
try:
result = concat(result)
except TypeError:
# we want to give a nice error here if
# we have non-same sized objects, so
# we don't automatically broadcast
raise ValueError("cannot perform both aggregation "
"and transformation operations "
"simultaneously")
return result, True
# fall thru
from pandas import DataFrame, Series
try:
result = DataFrame(result)
except ValueError:
# we have a dict of scalars
result = Series(result,
name=getattr(self, 'name', None))
return result, True
elif is_list_like(arg) and arg not in compat.string_types:
# we require a list, but not an 'str'
return self._aggregate_multiple_funcs(arg,
_level=_level,
_axis=_axis), None
else:
result = None
f = self._is_cython_func(arg)
if f and not args and not kwargs:
return getattr(self, f)(), None
# caller can react
return result, True
def _aggregate_multiple_funcs(self, arg, _level, _axis):
from pandas.core.reshape.concat import concat
if _axis != 0:
raise NotImplementedError("axis other than 0 is not supported")
if self._selected_obj.ndim == 1:
obj = self._selected_obj
else:
obj = self._obj_with_exclusions
results = []
keys = []
# degenerate case
if obj.ndim == 1:
for a in arg:
try:
colg = self._gotitem(obj.name, ndim=1, subset=obj)
results.append(colg.aggregate(a))
# make sure we find a good name
name = com.get_callable_name(a) or a
keys.append(name)
except (TypeError, DataError):
pass
except SpecificationError:
raise
# multiples
else:
for index, col in enumerate(obj):
try:
colg = self._gotitem(col, ndim=1,
subset=obj.iloc[:, index])
results.append(colg.aggregate(arg))
keys.append(col)
except (TypeError, DataError):
pass
except ValueError:
# cannot aggregate
continue
except SpecificationError:
raise
# if we are empty
if not len(results):
raise ValueError("no results")
try:
return concat(results, keys=keys, axis=1, sort=False)
except TypeError:
# we are concatting non-NDFrame objects,
# e.g. a list of scalars
from pandas.core.dtypes.cast import is_nested_object
from pandas import Series
result = Series(results, index=keys, name=self.name)
if is_nested_object(result):
raise ValueError("cannot combine transform and "
"aggregation operations")
return result
def _shallow_copy(self, obj=None, obj_type=None, **kwargs):
"""
return a new object with the replacement attributes
"""
if obj is None:
obj = self._selected_obj.copy()
if obj_type is None:
obj_type = self._constructor
if isinstance(obj, obj_type):
obj = obj.obj
for attr in self._attributes:
if attr not in kwargs:
kwargs[attr] = getattr(self, attr)
return obj_type(obj, **kwargs)
def _is_cython_func(self, arg):
"""
if we define an internal function for this argument, return it
"""
return self._cython_table.get(arg)
def _is_builtin_func(self, arg):
"""
if we define an builtin function for this argument, return it,
otherwise return the arg
"""
return self._builtin_table.get(arg, arg)
class IndexOpsMixin(object):
""" common ops mixin to support a unified interface / docs for Series /
Index
"""
# ndarray compatibility
__array_priority__ = 1000
def transpose(self, *args, **kwargs):
"""
Return the transpose, which is by definition self.
"""
nv.validate_transpose(args, kwargs)
return self
T = property(transpose, doc="Return the transpose, which is by "
"definition self.")
@property
def _is_homogeneous_type(self):
"""
Whether the object has a single dtype.
By definition, Series and Index are always considered homogeneous.
A MultiIndex may or may not be homogeneous, depending on the
dtypes of the levels.
See Also
--------
DataFrame._is_homogeneous_type
MultiIndex._is_homogeneous_type
"""
return True
@property
def shape(self):
"""
Return a tuple of the shape of the underlying data.
"""
return self._values.shape
@property
def ndim(self):
"""
Number of dimensions of the underlying data, by definition 1.
"""
return 1
def item(self):
"""
Return the first element of the underlying data as a python scalar.
"""
try:
return self.values.item()
except IndexError:
# copy numpy's message here because Py26 raises an IndexError
raise ValueError('can only convert an array of size 1 to a '
'Python scalar')
@property
def data(self):
"""
Return the data pointer of the underlying data.
"""
warnings.warn("{obj}.data is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.data
@property
def itemsize(self):
"""
Return the size of the dtype of the item of the underlying data.
"""
warnings.warn("{obj}.itemsize is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self._ndarray_values.itemsize
@property
def nbytes(self):
"""
Return the number of bytes in the underlying data.
"""
return self._values.nbytes
@property
def strides(self):
"""
Return the strides of the underlying data.
"""
warnings.warn("{obj}.strides is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self._ndarray_values.strides
@property
def size(self):
"""
Return the number of elements in the underlying data.
"""
return self._values.size
@property
def flags(self):
"""
Return the ndarray.flags for the underlying data.
"""
warnings.warn("{obj}.flags is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.flags
@property
def base(self):
"""
Return the base object if the memory of the underlying data is shared.
"""
warnings.warn("{obj}.base is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.base
@property
def array(self):
# type: () -> Union[np.ndarray, ExtensionArray]
"""
The actual Array backing this Series or Index.
.. versionadded:: 0.24.0
Returns
-------
array : numpy.ndarray or ExtensionArray
This is the actual array stored within this object. This differs
from ``.values`` which may require converting the data
to a different form.
See Also
--------
Index.to_numpy : Similar method that always returns a NumPy array.
Series.to_numpy : Similar method that always returns a NumPy array.
Notes
-----
This table lays out the different array types for each extension
dtype within pandas.
================== =============================
dtype array type
================== =============================
category Categorical
period PeriodArray
interval IntervalArray
IntegerNA IntegerArray
datetime64[ns, tz] DatetimeArray
================== =============================
For any 3rd-party extension types, the array type will be an
ExtensionArray.
For all remaining dtypes ``.array`` will be the :class:`numpy.ndarray`
stored within. If you absolutely need a NumPy array (possibly with
copying / coercing data), then use :meth:`Series.to_numpy` instead.
.. note::
``.array`` will always return the underlying object backing the
Series or Index. If a future version of pandas adds a specialized
extension type for a data type, then the return type of ``.array``
for that data type will change from an object-dtype ndarray to the
new ExtensionArray.
Examples
--------
>>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
>>> ser.array
[a, b, a]
Categories (2, object): [a, b]
"""
return self._values
def to_numpy(self, dtype=None, copy=False):
"""
A NumPy ndarray representing the values in this Series or Index.
.. versionadded:: 0.24.0
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`
| |
known differences found: 2"
in stdout.getvalue()
)
assert (
"Number of columns compared with all values completely equal: 0"
in stdout.getvalue()
)
def test_column_comparison_outputs_number_of_columns_with_differences_for_custom_known_diffs(
comparison_kd2,
):
stdout = io.StringIO()
comparison_kd2.report(file=stdout)
assert "****** Column Comparison ******" in stdout.getvalue()
assert (
"Number of columns compared with unexpected differences in some values: 2"
in stdout.getvalue()
)
assert (
"Number of columns compared with all values equal but known differences found: 1"
in stdout.getvalue()
)
assert (
"Number of columns compared with all values completely equal: 0"
in stdout.getvalue()
)
def test_columns_with_unequal_values_show_mismatch_counts(comparison1):
stdout = io.StringIO()
comparison1.report(file=stdout)
assert "****** Columns with Unequal Values ******" in stdout.getvalue()
assert re.search(
r"""Base\s*Column\s*Name \s+ Compare\s*Column\s*Name \s+ Base\s*Dtype \s+ Compare\sDtype \s*
\#\sMatches \s* \#\sMismatches \n
-+ \s+ -+ \s+ -+ \s+ -+ \s+ -+ \s+ -+""",
stdout.getvalue(),
re.X,
)
assert re.search(
r"""dollar_amt \s+ dollar_amt \s+ bigint \s+ double \s+ 2 \s+ 2""",
stdout.getvalue(),
re.X,
)
assert re.search(
r"""float_fld \s+ float_fld \s+ double \s+ double \s+ 1 \s+ 3""",
stdout.getvalue(),
re.X,
)
assert re.search(
r"""name \s+ name \s+ string \s+ string \s+ 3 \s+ 1""", stdout.getvalue(), re.X
)
def test_columns_with_different_names_with_unequal_values_show_mismatch_counts(
comparison3,
):
stdout = io.StringIO()
comparison3.report(file=stdout)
assert "****** Columns with Unequal Values ******" in stdout.getvalue()
assert re.search(
r"""Base\s*Column\s*Name \s+ Compare\s*Column\s*Name \s+ Base\s*Dtype \s+ Compare\sDtype \s*
\#\sMatches \s* \#\sMismatches \n
-+ \s+ -+ \s+ -+ \s+ -+ \s+ -+ \s+ -+""",
stdout.getvalue(),
re.X,
)
assert re.search(
r"""dollar_amt \s+ dollar_amount \s+ bigint \s+ double \s+ 2 \s+ 3""",
stdout.getvalue(),
re.X,
)
assert re.search(
r"""float_fld \s+ float_field \s+ double \s+ double \s+ 4 \s+ 1""",
stdout.getvalue(),
re.X,
)
assert re.search(
r"""name \s+ name \s+ string \s+ string \s+ 4 \s+ 1""", stdout.getvalue(), re.X
)
def test_rows_only_base_returns_a_dataframe_with_rows_only_in_base(spark, comparison1):
# require schema if contains only 1 row and contain field value as None
schema = StructType(
[
StructField("acct", LongType(), True),
StructField("date_fld", DateType(), True),
StructField("dollar_amt", LongType(), True),
StructField("float_fld", DoubleType(), True),
StructField("name", StringType(), True),
]
)
expected_df = spark.createDataFrame(
[
Row(
acct=10000001239,
date_fld=datetime.date(2017, 1, 1),
dollar_amt=1,
float_fld=None,
name="<NAME>",
)
],
schema,
)
assert comparison1.rows_only_base.count() == 1
assert (
expected_df.union(
comparison1.rows_only_base.select(
"acct", "date_fld", "dollar_amt", "float_fld", "name"
)
)
.distinct()
.count()
== 1
)
def test_rows_only_compare_returns_a_dataframe_with_rows_only_in_compare(
spark, comparison1
):
expected_df = spark.createDataFrame(
[
Row(
acct=10000001238,
dollar_amt=1.05,
name="<NAME>",
float_fld=111.0,
accnt_purge=True,
)
]
)
assert comparison1.rows_only_compare.count() == 1
assert expected_df.union(comparison1.rows_only_compare).distinct().count() == 1
def test_rows_both_mismatch_returns_a_dataframe_with_rows_where_variables_mismatched(
spark, comparison1
):
expected_df = spark.createDataFrame(
[
Row(
accnt_purge=False,
acct=10000001234,
date_fld=datetime.date(2017, 1, 1),
dollar_amt_base=123,
dollar_amt_compare=123.4,
dollar_amt_match=False,
float_fld_base=14530.1555,
float_fld_compare=14530.155,
float_fld_match=False,
name_base="<NAME>",
name_compare="<NAME>",
name_match=False,
),
Row(
accnt_purge=False,
acct=10000001235,
date_fld=datetime.date(2017, 1, 1),
dollar_amt_base=0,
dollar_amt_compare=0.45,
dollar_amt_match=False,
float_fld_base=1.0,
float_fld_compare=None,
float_fld_match=False,
name_base="<NAME>",
name_compare="<NAME>",
name_match=True,
),
Row(
accnt_purge=False,
acct=10000001236,
date_fld=datetime.date(2017, 1, 1),
dollar_amt_base=1345,
dollar_amt_compare=1345.0,
dollar_amt_match=True,
float_fld_base=None,
float_fld_compare=1.0,
float_fld_match=False,
name_base="<NAME>",
name_compare="<NAME>",
name_match=True,
),
]
)
assert comparison1.rows_both_mismatch.count() == 3
assert expected_df.union(comparison1.rows_both_mismatch).distinct().count() == 3
def test_rows_both_mismatch_only_includes_rows_with_true_mismatches_when_known_diffs_are_present(
spark, comparison_kd1
):
expected_df = spark.createDataFrame(
[
Row(
acct=10000001237,
acct_seq=0,
cd_base="0004",
cd_compare=4.0,
cd_match=True,
cd_match_type="KNOWN_DIFFERENCE",
open_dt_base=datetime.date(2017, 5, 4),
open_dt_compare=2017124,
open_dt_match=True,
open_dt_match_type="KNOWN_DIFFERENCE",
stat_cd_base="*2",
stat_cd_compare="V3",
stat_cd_match=False,
stat_cd_match_type="MISMATCH",
)
]
)
assert comparison_kd1.rows_both_mismatch.count() == 1
assert expected_df.union(comparison_kd1.rows_both_mismatch).distinct().count() == 1
def test_rows_both_all_returns_a_dataframe_with_all_rows_in_both_dataframes(
spark, comparison1
):
expected_df = spark.createDataFrame(
[
Row(
accnt_purge=False,
acct=10000001234,
date_fld=datetime.date(2017, 1, 1),
dollar_amt_base=123,
dollar_amt_compare=123.4,
dollar_amt_match=False,
float_fld_base=14530.1555,
float_fld_compare=14530.155,
float_fld_match=False,
name_base="<NAME>",
name_compare="<NAME>",
name_match=False,
),
Row(
accnt_purge=False,
acct=10000001235,
date_fld=datetime.date(2017, 1, 1),
dollar_amt_base=0,
dollar_amt_compare=0.45,
dollar_amt_match=False,
float_fld_base=1.0,
float_fld_compare=None,
float_fld_match=False,
name_base="<NAME>",
name_compare="<NAME>",
name_match=True,
),
Row(
accnt_purge=False,
acct=10000001236,
date_fld=datetime.date(2017, 1, 1),
dollar_amt_base=1345,
dollar_amt_compare=1345.0,
dollar_amt_match=True,
float_fld_base=None,
float_fld_compare=1.0,
float_fld_match=False,
name_base="<NAME>",
name_compare="<NAME>",
name_match=True,
),
Row(
accnt_purge=False,
acct=10000001237,
date_fld=datetime.date(2017, 1, 1),
dollar_amt_base=123456,
dollar_amt_compare=123456.0,
dollar_amt_match=True,
float_fld_base=345.12,
float_fld_compare=345.12,
float_fld_match=True,
name_base="<NAME>",
name_compare="<NAME>",
name_match=True,
),
]
)
assert comparison1.rows_both_all.count() == 4
assert expected_df.union(comparison1.rows_both_all).distinct().count() == 4
def test_rows_both_all_shows_known_diffs_flag_and_known_diffs_count_as_matches(
spark, comparison_kd1
):
expected_df = spark.createDataFrame(
[
Row(
acct=10000001234,
acct_seq=0,
cd_base="0001",
cd_compare=1.0,
cd_match=True,
cd_match_type="KNOWN_DIFFERENCE",
open_dt_base=datetime.date(2017, 5, 1),
open_dt_compare=2017121,
open_dt_match=True,
open_dt_match_type="KNOWN_DIFFERENCE",
stat_cd_base="*2",
stat_cd_compare=None,
stat_cd_match=True,
stat_cd_match_type="KNOWN_DIFFERENCE",
),
Row(
acct=10000001235,
acct_seq=0,
cd_base="0002",
cd_compare=2.0,
cd_match=True,
cd_match_type="KNOWN_DIFFERENCE",
open_dt_base=datetime.date(2017, 5, 2),
open_dt_compare=2017122,
open_dt_match=True,
open_dt_match_type="KNOWN_DIFFERENCE",
stat_cd_base="V1",
stat_cd_compare="V1",
stat_cd_match=True,
stat_cd_match_type="MATCH",
),
Row(
acct=10000001236,
acct_seq=0,
cd_base="0003",
cd_compare=3.0,
cd_match=True,
cd_match_type="KNOWN_DIFFERENCE",
open_dt_base=datetime.date(2017, 5, 3),
open_dt_compare=2017123,
open_dt_match=True,
open_dt_match_type="KNOWN_DIFFERENCE",
stat_cd_base="V2",
stat_cd_compare="V2",
stat_cd_match=True,
stat_cd_match_type="MATCH",
),
Row(
acct=10000001237,
acct_seq=0,
cd_base="0004",
cd_compare=4.0,
cd_match=True,
cd_match_type="KNOWN_DIFFERENCE",
open_dt_base=datetime.date(2017, 5, 4),
open_dt_compare=2017124,
open_dt_match=True,
open_dt_match_type="KNOWN_DIFFERENCE",
stat_cd_base="*2",
stat_cd_compare="V3",
stat_cd_match=False,
stat_cd_match_type="MISMATCH",
),
Row(
acct=10000001238,
acct_seq=0,
cd_base="0005",
cd_compare=5.0,
cd_match=True,
cd_match_type="KNOWN_DIFFERENCE",
open_dt_base=datetime.date(2017, 5, 5),
open_dt_compare=2017125,
open_dt_match=True,
open_dt_match_type="KNOWN_DIFFERENCE",
stat_cd_base="*2",
stat_cd_compare=None,
stat_cd_match=True,
stat_cd_match_type="KNOWN_DIFFERENCE",
),
]
)
assert comparison_kd1.rows_both_all.count() == 5
assert expected_df.union(comparison_kd1.rows_both_all).distinct().count() == 5
def test_rows_both_all_returns_a_dataframe_with_all_rows_in_identical_dataframes(
spark, comparison2
):
expected_df = spark.createDataFrame(
[
Row(
acct=10000001234,
date_fld_base=datetime.date(2017, 1, 1),
date_fld_compare=datetime.date(2017, 1, 1),
date_fld_match=True,
dollar_amt_base=123,
dollar_amt_compare=123,
dollar_amt_match=True,
float_fld_base=14530.1555,
float_fld_compare=14530.1555,
float_fld_match=True,
name_base="<NAME>",
name_compare="<NAME>",
name_match=True,
),
Row(
acct=10000001235,
date_fld_base=datetime.date(2017, 1, 1),
date_fld_compare=datetime.date(2017, 1, 1),
date_fld_match=True,
dollar_amt_base=0,
dollar_amt_compare=0,
dollar_amt_match=True,
float_fld_base=1.0,
float_fld_compare=1.0,
float_fld_match=True,
name_base="<NAME>",
name_compare="<NAME>",
name_match=True,
),
Row(
acct=10000001236,
date_fld_base=datetime.date(2017, 1, 1),
date_fld_compare=datetime.date(2017, 1, 1),
date_fld_match=True,
dollar_amt_base=1345,
dollar_amt_compare=1345,
dollar_amt_match=True,
float_fld_base=None,
float_fld_compare=None,
float_fld_match=True,
name_base="<NAME>",
name_compare="<NAME>",
name_match=True,
),
Row(
acct=10000001237,
date_fld_base=datetime.date(2017, 1, 1),
date_fld_compare=datetime.date(2017, 1, 1),
date_fld_match=True,
dollar_amt_base=123456,
dollar_amt_compare=123456,
dollar_amt_match=True,
float_fld_base=345.12,
float_fld_compare=345.12,
float_fld_match=True,
name_base="<NAME>",
name_compare="<NAME>",
name_match=True,
),
Row(
acct=10000001239,
date_fld_base=datetime.date(2017, 1, 1),
date_fld_compare=datetime.date(2017, 1, 1),
date_fld_match=True,
dollar_amt_base=1,
dollar_amt_compare=1,
dollar_amt_match=True,
float_fld_base=None,
float_fld_compare=None,
float_fld_match=True,
name_base="<NAME>",
name_compare="<NAME>",
name_match=True,
),
]
)
assert comparison2.rows_both_all.count() == 5
assert expected_df.union(comparison2.rows_both_all).distinct().count() == 5
def test_rows_both_all_returns_all_rows_in_both_dataframes_for_differently_named_columns(
spark, comparison3
):
expected_df = spark.createDataFrame(
[
Row(
accnt_purge=False,
acct=10000001234,
date_fld_base=datetime.date(2017, 1, 1),
date_fld_compare=datetime.date(2017, 1, 1),
date_fld_match=True,
dollar_amt_base=123,
dollar_amt_compare=123.4,
dollar_amt_match=False,
float_fld_base=14530.1555,
float_fld_compare=14530.155,
float_fld_match=False,
name_base="<NAME>",
name_compare="<NAME>",
name_match=False,
),
Row(
accnt_purge=False,
acct=10000001235,
date_fld_base=datetime.date(2017, 1, 1),
date_fld_compare=datetime.date(2017, 1, 1),
date_fld_match=True,
dollar_amt_base=0,
dollar_amt_compare=0.45,
dollar_amt_match=False,
float_fld_base=1.0,
float_fld_compare=1.0,
float_fld_match=True,
name_base="<NAME>",
name_compare="<NAME>",
name_match=True,
),
Row(
accnt_purge=False,
acct=10000001236,
date_fld_base=datetime.date(2017, 1, 1),
date_fld_compare=datetime.date(2017, 1, 1),
date_fld_match=True,
dollar_amt_base=1345,
dollar_amt_compare=1345.0,
dollar_amt_match=True,
float_fld_base=None,
float_fld_compare=None,
float_fld_match=True,
name_base="<NAME>",
name_compare="<NAME>",
name_match=True,
),
Row(
accnt_purge=False,
acct=10000001237,
date_fld_base=datetime.date(2017, 1, 1),
date_fld_compare=datetime.date(2017, 1, 1),
date_fld_match=True,
dollar_amt_base=123456,
dollar_amt_compare=123456.0,
dollar_amt_match=True,
float_fld_base=345.12,
float_fld_compare=345.12,
float_fld_match=True,
name_base="<NAME>",
name_compare="<NAME>",
name_match=True,
),
Row(
accnt_purge=True,
acct=10000001239,
date_fld_base=datetime.date(2017, 1, 1),
date_fld_compare=datetime.date(2017, 1, 1),
date_fld_match=True,
dollar_amt_base=1,
dollar_amt_compare=1.05,
dollar_amt_match=False,
float_fld_base=None,
float_fld_compare=None,
float_fld_match=True,
name_base="<NAME>",
name_compare="<NAME>",
name_match=True,
),
]
)
assert comparison3.rows_both_all.count() == 5
assert expected_df.union(comparison3.rows_both_all).distinct().count() == 5
def test_columns_with_unequal_values_text_is_aligned(comparison4):
stdout = io.StringIO()
comparison4.report(file=stdout)
stdout.seek(0) # Back up to the beginning of the stream
text_alignment_validator(
report=stdout,
section_start="****** Columns with Unequal Values ******",
section_end="\n",
left_indices=(1, 2, 3, 4),
right_indices=(5, 6),
column_regexes=[
r"""(Base\sColumn\sName) \s+ (Compare\sColumn\sName) \s+ (Base\sDtype) \s+ (Compare\sDtype) \s+
(\#\sMatches) \s+ (\#\sMismatches)""",
r"""(-+) \s+ (-+) \s+ (-+) \s+ (-+) \s+ (-+) \s+ (-+)""",
r"""(dollar_amt) \s+ (dollar_amt) \s+ (bigint) \s+ (double) \s+ (2) \s+ (2)""",
r"""(float_fld) \s+ (float_fld) \s+ (double) \s+ (double) \s+ (1) \s+ (3)""",
r"""(super_duper_big_long_name) \s+ (name) \s+ (string) \s+ (string) \s+ (3) \s+ (1)\s*""",
],
)
def test_columns_with_unequal_values_text_is_aligned_with_known_differences(
comparison_kd1,
):
stdout = io.StringIO()
comparison_kd1.report(file=stdout)
stdout.seek(0) # Back up to the beginning of the stream
text_alignment_validator(
report=stdout,
section_start="****** Columns with Unequal Values ******",
section_end="\n",
left_indices=(1, 2, 3, 4),
right_indices=(5, 6, 7),
column_regexes=[
r"""(Base\sColumn\sName) \s+ (Compare\sColumn\sName) \s+ (Base\sDtype) \s+ (Compare\sDtype) \s+
(\#\sMatches) \s+ (\#\sKnown\sDiffs) \s+ (\#\sMismatches)""",
r"""(-+) \s+ (-+) \s+ (-+) \s+ (-+) \s+ (-+) \s+ (-+) \s+ (-+)""",
r"""(stat_cd) \s+ (STATC) \s+ (string) \s+ (string) \s+ (2) \s+ (2) \s+ (1)""",
r"""(open_dt) \s+ (ACCOUNT_OPEN) \s+ (date) \s+ (bigint) \s+ (0) \s+ (5) \s+ (0)""",
r"""(cd) \s+ (CODE) \s+ (string) \s+ (double) \s+ (0) \s+ (5) \s+ (0)\s*""",
],
)
def test_columns_with_unequal_values_text_is_aligned_with_custom_known_differences(
comparison_kd2,
):
stdout = io.StringIO()
comparison_kd2.report(file=stdout)
stdout.seek(0) # Back up to the beginning of the stream
text_alignment_validator(
report=stdout,
section_start="****** Columns with Unequal Values ******",
section_end="\n",
left_indices=(1, 2, 3, 4),
right_indices=(5, 6, 7),
column_regexes=[
r"""(Base\sColumn\sName) \s+ (Compare\sColumn\sName) \s+ (Base\sDtype) \s+ (Compare\sDtype) \s+
(\#\sMatches) \s+ (\#\sKnown\sDiffs) \s+ (\#\sMismatches)""",
r"""(-+) \s+ (-+) \s+ (-+) \s+ (-+) \s+ (-+) \s+ (-+) \s+ (-+)""",
r"""(stat_cd) \s+ (STATC) \s+ (string) \s+ (string) \s+ (2) \s+ (2) \s+ (1)""",
r"""(open_dt) \s+ (ACCOUNT_OPEN) \s+ (date) \s+ (bigint) \s+ (0) \s+ (0) \s+ (5)""",
r"""(cd) \s+ (CODE) \s+ (string) \s+ (double) \s+ (0) \s+ (5) \s+ (0)\s*""",
],
)
def test_columns_with_unequal_values_text_is_aligned_for_decimals(comparison_decimal):
stdout = io.StringIO()
comparison_decimal.report(file=stdout)
stdout.seek(0) # Back up to the beginning of the stream
text_alignment_validator(
report=stdout,
section_start="****** Columns with Unequal Values ******",
section_end="\n",
left_indices=(1, 2, 3, 4),
right_indices=(5, 6),
column_regexes=[
r"""(Base\sColumn\sName) \s+ (Compare\sColumn\sName) \s+ (Base\sDtype) \s+ (Compare\sDtype) \s+
(\#\sMatches) \s+ (\#\sMismatches)""",
r"""(-+) \s+ (-+) \s+ (-+) \s+ (-+) \s+ (-+) \s+ (-+)""",
r"""(dollar_amt) \s+ (dollar_amt) \s+ (decimal\(8,2\)) \s+ (double) \s+ (1) \s+ (1)""",
],
)
def test_schema_differences_text_is_aligned(comparison4):
stdout = io.StringIO()
comparison4.report(file=stdout)
comparison4.report()
stdout.seek(0) # Back up to the beginning of the stream
text_alignment_validator(
report=stdout,
section_start="****** Schema Differences ******",
section_end="\n",
left_indices=(1, 2, 3, 4),
right_indices=(),
column_regexes=[
r"""(Base\sColumn\sName) \s+ (Compare\sColumn\sName) \s+ (Base\sDtype) \s+ (Compare\sDtype)""",
r"""(-+) \s+ (-+) \s+ (-+) \s+ (-+)""",
r"""(dollar_amt) \s+ (dollar_amt) \s+ | |
<gh_stars>0
import gc
import os
from itertools import product
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scanpy as sc
import scipy.stats as sts
from tqdm.notebook import tqdm
import triku as tk
from triku.tl._triku_functions import subtract_median
from sklearn.decomposition import PCA
import ray
@ray.remote
def run_individual(adata, window, n_comp, knn, seed, save_dir, dataset_prefix):
# print(window, n_comp, knn, seed)
save_file = "{save_dir}/{pref}-w_{w}-comps_{n_comps}-knn_{knn}-seed_{seed}.csv".format(
save_dir=save_dir,
pref=dataset_prefix,
w=window,
n_comps=n_comp,
knn=knn,
seed=seed,
)
if os.path.exists(save_file):
pass
# print("FILE EXISTS!")
else:
adata_copy = adata.copy()
try:
sc.pp.filter_genes(adata_copy, min_cells=1)
sc.pp.filter_cells(adata_copy, min_genes=1)
sc.pp.log1p(adata_copy)
try:
pca = PCA(n_components=n_comp, whiten=True, svd_solver="auto", random_state=seed,).fit_transform(adata_copy.X.toarray())
except: # the array is already dense
pca = PCA(n_components=n_comp, whiten=True, svd_solver="auto", random_state=seed,).fit_transform(adata_copy.X)
adata_copy.obsm['X_pca'] = pca
sc.pp.neighbors(adata_copy, random_state=seed, metric='cosine', n_neighbors=knn)
tk.tl.triku(adata_copy, n_windows=window, verbose='error')
df_res = pd.DataFrame(
data={
"triku_distance": adata_copy.var["triku_distance"].values,
"triku_distance_uncorrected": adata_copy.var["triku_distance_uncorrected"].values
},
index=adata_copy.var_names.values,
)
except:
df_res = pd.DataFrame(
data={
"triku_distance": [np.NaN] * len(adata_copy.var_names),
"triku_distance_uncorrected": [np.NaN] * len(adata_copy.var_names)
},
index=adata_copy.var_names.values,
)
df_res.to_csv(save_file)
def run_batch(adata, windows, n_comps, knns, seeds, save_dir, dataset_prefix):
# We have to run an array of selections. To do that, one of the parameters above must be
# a list, and the rest a list of one integer. They can also be all lists, but consider that the
# calculation will take time. Once triku is run, we will export the distances for each combination
# as csv. Each csv will contain the median-corrected distances, with and without correction with
# randomization, for a determined combination of window / n_comp / knn and seed (we will use 3 or 5 seeds
# for replication purposes).
ray.init(ignore_reinit_error=True)
ray_get = ray.get([run_individual.remote(adata, window, n_comp, knn, seed, save_dir, dataset_prefix)
for window, n_comp, knn, seed in product(*[windows, n_comps, knns, seeds])])
ray.shutdown()
def run_all_batches(lib_preps, orgs, dataset, read_dir, save_dir):
for lib_prep, org in tqdm(product(*[lib_preps, orgs])):
file_in = None
for file in os.listdir(read_dir):
if (
org in file in file
and lib_prep in file
and file.endswith(".h5ad")
):
file_in = file
if file_in is None:
print(f"File for {lib_prep} in {org} not found.")
continue
adata = sc.read_h5ad(read_dir + file_in)
sqr_n_cells = int(adata.shape[0] ** 0.5)
run_batch(
adata,
windows=[100],
n_comps=[3, 5, 10, 20, 30, 40, 50, 100],
knns=[sqr_n_cells + 1],
seeds=[0, 1, 2, 3, 4],
save_dir=save_dir,
dataset_prefix=lib_prep + "_" + dataset + "_" + org,
)
run_batch(
adata,
windows=[100],
n_comps=[30],
knns=[
sqr_n_cells // 20 + 1,
sqr_n_cells // 10 + 1,
sqr_n_cells // 5 + 1,
sqr_n_cells // 2 + 1,
sqr_n_cells + 1,
int(sqr_n_cells * 1.5) + 1,
sqr_n_cells * 2 + 1,
sqr_n_cells * 4 + 1,
],
seeds=[0, 1, 2, 3, 4],
save_dir=save_dir,
dataset_prefix=lib_prep + "_" + dataset + "_" + org,
)
run_batch(
adata,
windows=[10, 20, 30, 50, 100, 200, 500, 1000],
n_comps=[30],
knns=[sqr_n_cells + 1],
seeds=[0, 1, 2, 3, 4],
save_dir=save_dir,
dataset_prefix=lib_prep + "_" + dataset + "_" + org,
)
del adata
gc.collect()
def return_knn_indices(save_dir, org, lib_prep, dataset):
knn_list = []
for file in os.listdir(save_dir):
if (
org in file
and lib_prep in file
and "w_100-" in file
and "comps_30-" in file
and dataset in file
):
knn_str = file[file.find("knn") + 4 :]
knn_list.append(int(knn_str[: knn_str.find("-")]))
knn_list = sorted(list(dict.fromkeys(knn_list)))
return knn_list
def return_pca_indices(save_dir, org, lib_prep, dataset):
# We need to recover the fixed kNN value. This value is the 5th value on the knn_list; so we will take it.
knn_pinpoint = return_knn_indices(save_dir, org, lib_prep, dataset)[4]
# Now we get the list of n_comps values
pca_list = []
for file in os.listdir(save_dir):
if (
org in file
and lib_prep in file
and "w_100-" in file
and "knn_%s-" % knn_pinpoint in file
and dataset in file
):
pca_str = file[file.find("comps") + 6 :]
pca_list.append(int(pca_str[: pca_str.find("-")]))
pca_list = sorted(list(dict.fromkeys(pca_list)))
return pca_list, knn_pinpoint
def return_window_indices(save_dir, org, lib_prep, dataset):
# We need to recover the fixed kNN value. This value is the 5th value on the knn_list; so we will take it.
knn_pinpoint = return_knn_indices(save_dir, org, lib_prep, dataset)[4]
# Now we get the list of n_comps values
w_list = []
for file in os.listdir(save_dir):
if (
org in file
and lib_prep in file
and "comps_30-" in file
and "knn_%s-" % knn_pinpoint in file
and dataset in file
):
w_str = file[file.find("w_") + 2 :]
w_list.append(int(w_str[: w_str.find("-")]))
w_list = sorted(list(dict.fromkeys(w_list)))
return w_list, knn_pinpoint
def return_relative_noise(df_1, df_2, select_index_df):
relative_noise_non_rand = list(
(
df_1["triku_distance"].loc[select_index_df].values
- df_2["triku_distance"].loc[select_index_df].values
)
/ (
np.abs(df_1["triku_distance"].loc[select_index_df].values)
+ np.abs(df_2["triku_distance"].loc[select_index_df].values)
)
)
relative_noise_rand = list(
(
df_1["triku_distance_uncorrected"].loc[select_index_df].values
- df_2["triku_distance_uncorrected"].loc[select_index_df].values
)
/ (
np.abs(df_1["triku_distance_uncorrected"].loc[select_index_df].values)
+ np.abs(df_2["triku_distance_uncorrected"].loc[select_index_df].values)
)
)
return relative_noise_rand, relative_noise_non_rand
def return_percentage_overlap(df_1, df_2, min_n_feats, max_n_feats):
feats_1_no_cor = (
df_1.sort_values(by="triku_distance", ascending=False)
.index[min_n_feats:max_n_feats]
.values
)
feats_2_no_cor = (
df_2.sort_values(by="triku_distance", ascending=False)
.index[min_n_feats:max_n_feats]
.values
)
feats_1_rand = (
df_1.sort_values(by="triku_distance_uncorrected", ascending=False)
.index[min_n_feats:max_n_feats]
.values
)
feats_2_rand = (
df_2.sort_values(by="triku_distance_uncorrected", ascending=False)
.index[min_n_feats:max_n_feats]
.values
)
percentage_overlap_non_rand = len(
np.intersect1d(feats_1_no_cor, feats_2_no_cor)
) / (max_n_feats - min_n_feats)
percentage_overlap_rand = len(
np.intersect1d(feats_1_rand, feats_2_rand)
) / (max_n_feats - min_n_feats)
return [percentage_overlap_rand], [percentage_overlap_non_rand]
def return_correlation(df_1, df_2, min_n_feats, max_n_feats):
feats_1_no_cor = (
df_1["triku_distance"]
.sort_values(ascending=False)
.iloc[min_n_feats:max_n_feats]
.values
)
feats_2_no_cor = (
df_2["triku_distance"]
.sort_values(ascending=False)
.iloc[min_n_feats:max_n_feats]
.values
)
feats_1_rand = (
df_1["triku_distance_uncorrected"]
.sort_values(ascending=False)
.iloc[min_n_feats:max_n_feats]
.values
)
feats_2_rand = (
df_2["triku_distance_uncorrected"]
.sort_values(ascending=False)
.iloc[min_n_feats:max_n_feats]
.values
)
for array in [feats_1_no_cor, feats_2_no_cor, feats_1_rand, feats_2_rand]:
np.nan_to_num(array, copy=False)
correlation_non_rand = sts.pearsonr(feats_1_no_cor, feats_2_no_cor)
correlation_rand = sts.pearsonr(feats_1_rand, feats_2_rand)
return [correlation_non_rand[0]], [correlation_rand[0]]
def random_noise_parameter(
lib_prep, org, dataset, save_dir, min_n_feats, max_n_feats, what, by
):
list_dists_non_randomized, list_dists_randomized, list_param_value = (
[],
[],
[],
)
knn_list = return_knn_indices(save_dir, org, lib_prep, dataset)
pca_list = return_pca_indices(save_dir, org, lib_prep, dataset)
if by == "knn":
parameter_list = knn_list
elif by == "pca":
parameter_list, _ = pca_list
for val in parameter_list:
list_dfs = []
for file in os.listdir(save_dir):
if by == "knn":
static_comp = (
"w_100-" in file
and "comps_30-" in file
and dataset in file
)
dyn_comp = "knn_" + str(val) in file
elif by == "pca":
static_comp = (
"w_100-" in file
and "knn_" + str(knn_list[4]) + "-" in file
and dataset in file
)
dyn_comp = "comps_{}-".format(val) in file
if org in file and lib_prep in file and static_comp and dyn_comp:
df = pd.read_csv(save_dir + file)
df = df.set_index("Unnamed: 0")
list_dfs.append(df)
# find the genes with biggest distance. We will only choose the last dataframe, but for other
# stuff we will do a combination of all of them
select_index_df = (
(df["triku_distance"] + df["triku_distance_uncorrected"])
.sort_values(ascending=False)
.index[min_n_feats:max_n_feats]
)
for i in range(len(list_dfs)):
for j in range(len(list_dfs)):
if i > j:
df_1, df_2 = list_dfs[i], list_dfs[j]
if what == "relative noise":
what_rand, what_non_rand = return_relative_noise(
df_1, df_2, select_index_df
)
elif what == "overlap":
what_rand, what_non_rand = return_percentage_overlap(
df_1, df_2, min_n_feats, max_n_feats
)
else:
what_rand, what_non_rand = None, None
list_dists_non_randomized += what_non_rand
list_dists_randomized += what_rand
list_param_value += [val] * len(what_non_rand)
df_violin = pd.DataFrame(
{
"d": np.abs(list_dists_non_randomized + list_dists_randomized),
by: list_param_value * 2,
"randomized": ["No"] * len(list_dists_non_randomized)
+ ["Yes"] * len(list_dists_randomized),
}
)
return df_violin
def compare_parameter(
lib_prep, org, dataset, save_dir, min_n_feats, max_n_feats, what, by
):
list_dists_non_randomized, list_dists_randomized, list_knn = [], [], []
knn_list = return_knn_indices(save_dir, org, lib_prep, dataset)
pca_list = return_pca_indices(save_dir, org, lib_prep, dataset)
window_list = return_window_indices(save_dir, org, lib_prep, dataset)
# We first fill on list of dfs with knn = sqrt(N)
list_dfs_knn_1 = []
for file in os.listdir(save_dir):
if (
org in file
and lib_prep in file
and "w_100-" in file
and "comps_30-" in file
and "knn_" + str(knn_list[4]) in file
and dataset in file
):
df = pd.read_csv(save_dir + file)
df = df.set_index("Unnamed: 0")
list_dfs_knn_1.append(df)
if by == "knn":
parameter_list = knn_list
elif by == "pca":
parameter_list, _ = pca_list
elif by == "w":
parameter_list, _ = window_list
for val in parameter_list:
list_dfs_knn_2 = []
for file in os.listdir(save_dir):
if by == "knn":
static_comp = (
"w_100-" in file
and "comps_30-" in file
and dataset in file
)
dyn_comp = "knn_" + str(val) in file
elif by == "pca":
static_comp = (
"w_100-" in file
and "knn_" + str(knn_list[4]) + "-" in file
and dataset in file
)
dyn_comp = "comps_{}-".format(val) in file
elif by == "w":
static_comp = (
"comps_30-" in file
and "knn_" + str(knn_list[4]) + "-" in file
and dataset in file
)
dyn_comp = "w_{}-".format(val) in file
if org in file and | |
n n n n n n n n n n n 13 60
38 X monitor_control g 2 1986 semidetached n n h n n n n n n h n h h h n 90 444
39 X monitor_control g 2 1986 semidetached n n h n n n n n n n n n n n n 8 42
40 X monitor_control g 2 1986 semidetached n n h h n n n n n n n n n n n 16 114
41 hst datacapture g 2 1980 semidetached n h h vh h l h h n h l h h n l 177.9 1248
42 slp launchprocessing g 6 1975 semidetached h l h n n l l n n h n n h vl n 302 2400
43 Y application_ground g 5 1982 semidetached n h l n n h n h h n n n h h n 282.1 1368
44 Y application_ground g 5 1982 semidetached h h l n n n h h h n n n h n n 284.7 973
45 Y avionicsmonitoring g 5 1982 semidetached h h n n n l l n h h n h n n n 79 400
46 Y avionicsmonitoring g 5 1977 semidetached l n n n n l l h h vh n h l l h 423 2400
47 Y missionplanning g 5 1977 semidetached n n n n n l n h vh vh l h h n n 190 420
48 Y missionplanning g 5 1984 semidetached n n h n h n n h h n n h h n h 47.5 252
49 Y missionplanning g 5 1980 semidetached vh n xh h h l l n h n n n l h n 21 107
50 Y simulation g 5 1983 semidetached n h h vh n n h h h h n h l l h 78 571.4
51 Y simulation g 5 1984 semidetached n h h vh n n h h h h n h l l h 11.4 98.8
52 Y simulation g 5 1985 semidetached n h h vh n n h h h h n h l l h 19.3 155
53 Y missionplanning g 5 1979 semidetached h n vh h h l h h n n h h l vh h 101 750
54 Y missionplanning g 5 1979 semidetached h n h h h l h n h n n n l vh n 219 2120
55 Y utility g 5 1979 semidetached h n h h h l h n h n n n l vh n 50 370
56 spl datacapture g 2 1979 semidetached vh h h vh vh n n vh vh vh n h h h l 227 1181
57 spl batchdataprocessing g 2 1977 semidetached n h vh n n l n h n vh l n h n l 70 278
58 de avionicsmonitoring g 2 1979 semidetached h l h n n l l n n n n h h n l 0.9 8.4
59 slp operatingsystem g 6 1974 semidetached vh l xh xh vh l l h vh h vl h vl vl h 980 4560
60 slp operatingsystem g 6 1975 embedded n l h n n l l vh n vh h h n l n 350 720
61 Y operatingsystem g 5 1976 embedded h n xh h h l l h n n h h h h n 70 458
62 Y utility g 5 1979 embedded h n xh h h l l h n n h h h h n 271 2460
63 Y avionicsmonitoring g 5 1971 organic n n n n n l l h h h n h n l n 90 162
64 Y avionicsmonitoring g 5 1980 organic n n n n n l l h h h n h n l n 40 150
65 Y avionicsmonitoring g 5 1979 embedded h n h h n l l h h h n h n n n 137 636
66 Y avionicsmonitoring g 5 1977 embedded h n h h n h l h h h n h n vl n 150 882
67 Y avionicsmonitoring g 5 1976 embedded vh n h h n l l h h h n h n n n 339 444
68 Y avionicsmonitoring g 5 1983 organic l h l n n h l h h h n h n l n 240 192
69 Y avionicsmonitoring g 5 1978 semidetached h n h n vh l n h h h h h l l l 144 576
70 Y avionicsmonitoring g 5 1979 semidetached n l n n vh l n h h h h h l l l 151 432
71 Y avionicsmonitoring g 5 1979 semidetached n l h n vh l n h h h h h l l l 34 72
72 Y avionicsmonitoring g 5 1979 semidetached n n h n vh l n h h h h h l l l 98 300
73 Y avionicsmonitoring g 5 1979 semidetached n n h n vh l n h h h h h l l l 85 300
74 Y avionicsmonitoring g 5 1982 semidetached n l n n vh l n h h h h h l l l 20 240
75 Y avionicsmonitoring g 5 1978 semidetached n l n n vh l n h h h h h l l l 111 600
76 Y avionicsmonitoring g 5 1978 semidetached h vh h n vh l n h h h h h l l l 162 756
77 Y avionicsmonitoring g 5 1978 semidetached h h vh n vh l n h h h h h l l l 352 1200
78 Y operatingsystem g 5 1979 semidetached h n vh n vh l n h h h h h l l l 165 97
79 Y missionplanning g 5 1984 embedded h n vh h h l vh h n n h h h vh h 60 409
80 Y missionplanning g 5 1984 embedded h n vh h h l vh h n n h h h vh h 100 703
81 hst Avionics f 2 1980 embedded h vh vh xh xh h h n n n l l n n h 32 1350
82 hst Avionics f 2 1980 embedded h h h vh xh h h h h h h h h n n 53 480
84 spl Avionics f 3 1977 embedded h l vh vh xh l n vh vh vh vl vl h h n 41 599
89 spl Avionics f 3 1977 embedded h l vh vh xh l n vh vh vh vl vl h h n 24 430
91 Y Avionics f 5 1977 embedded vh h vh xh xh n n h h h h h h n h 165 4178.2
92 Y science f 5 1977 embedded vh h vh xh xh n n h h h h h h n h 65 1772.5
93 Y Avionics f 5 1977 embedded vh h vh xh xh n l h h h h h h n h 70 1645.9
94 Y Avionics f 5 1977 embedded vh h xh xh xh n n h h h h h h n h 50 1924.5
97 gal Avionics f 5 1982 embedded vh l vh vh xh l l h l n vl l l h h 7.25 648
98 Y Avionics f 5 1980 embedded vh h vh xh xh n n h h h h h h n h 233 8211
99 X Avionics f 2 1983 embedded h n vh vh vh h h n n n l l n n h 16.3 480
100 X Avionics f 2 1983 embedded h n vh vh vh h h n n n l l n n h 6.2 12
"""))
# ______________________________________________________________________-----
#### Rows
class | |
import warnings
import numbers
import numpy as np
import geopandas as gpd
from sklearn.cluster import MiniBatchKMeans
from .base_classes import BaseSpatialCV
from .grid_builder import construct_blocks, assign_pt_to_grid
from .utils import geometry_to_2d, convert_geodataframe, load_custom_polygon
__all__ = [
"HBLOCK",
"SKCV",
"RepeatedSKCV",
"UserDefinedSCV"
]
class HBLOCK(BaseSpatialCV):
"""
H-Blocking spatial cross-validator. Partitions study area
into a series of grid polygons that are assigned into
different folds based on several user-defined options. HBLOCK
exposes several parameters for choosing block sizes, types and
fold assignment.
Yields indices to split data into training and test sets.
Parameters
----------
tiles_x : integer, default=5
Number of grid tiles in the West-East direction.
tiles_y : integer, default=5
Number of grid tiles in the North-South direction.
shape : string, default='square'
Specify shape of grid polygons, square or hex.
method : string, default='unique'
Choose grid ID assignment method to build folds. Options
are: unique, where every polygon in the grid is a fold;
systematic, where folds reflect diagonal or anti-diagonal
patterns across the study area; random, where folds are
randomly assigned into groups determined by n_groups parameter;
and optimized_random, where random assignment of grids into
groups are optimized by reducing disimilarity between folds.
buffer_radius : integer, default=0
Buffer radius (dead zone) to exclude training points that are
within a defined distance of test data within a fold.
direction : string, default='diagonal'
Choose direction of pattern for systematic grid assignment,
diagonal or anti-diagonal (anti).
n_groups : integer, default=5
Number of folds to randomly assign grid polygons into.
data : array
Array containing covariates used in predictive task. Used to
calculate disimilarity of feature space between folds to
find the optimized random grid assignment.
n_sims : integer, default=10
Number of iterations in which to find optimized grid assignment
into folds.
distance_metric : string, default='euclidean'
Distance metric used to reconcile points that sit at exact
border between two grids. Defaults to euclidean assuming
projected coordinate system, otherwise use haversine for
unprojected spaces.
random_state : int, default=None
random_state is the seed used by the random number generator.
Examples
--------
"""
def __init__(
self,
tiles_x=5,
tiles_y=5,
shape='square',
method='unique',
buffer_radius=0,
direction='diagonal',
n_groups=5,
data=None,
n_sims=10,
distance_metric='euclidean',
random_state=None
):
self.tiles_x = tiles_x
self.tiles_y = tiles_y
self.shape = shape
self.method = method
self.buffer_radius = buffer_radius
self.direction = direction
self.n_groups = n_groups
self.data = data
self.n_sims = n_sims
self.distance_metric = distance_metric
self.n_splits = tiles_x*tiles_y
self.random_state = random_state
def _iter_test_indices(self, XYs):
"""
Generates integer indices corresponding to test sets and
training indices to be excluded from model training.
Parameters
----------
XYs : GeoSeries
GeoSeries containing shapely Points that identify Easting
and Northing coordinates of data points.
Yields
------
test_indices : array
The testing set indices for that fold.
train_exclude : array
The training set indices to exclude for that fold.
"""
# Define grid type used in CV procedure
grid = construct_blocks(XYs,
tiles_x = self.tiles_x,
tiles_y = self.tiles_y,
shape = self.shape,
method = self.method,
direction = self.direction,
n_groups = self.n_groups,
data = self.data,
n_sims = self.n_sims,
random_state = self.random_state)
# Convert to GDF to use Geopandas functions
XYs = gpd.GeoDataFrame(({'geometry':XYs}))
# Assign pts to grids
XYs = assign_pt_to_grid(XYs, grid, self.distance_metric)
grid_ids = np.unique(grid.grid_id)
# Yield test indices and optionally training indices within buffer
for grid_id in grid_ids:
test_indices = XYs.loc[XYs['grid_id'] == grid_id ].index.values
# Remove empty grids
if len(test_indices) < 1:
continue
grid_poly_buffer = grid.loc[[grid_id]].buffer(self.buffer_radius)
test_indices, train_exclude = \
super()._remove_buffered_indices(XYs, test_indices,
self.buffer_radius, grid_poly_buffer)
yield test_indices, train_exclude
class SKCV(BaseSpatialCV):
"""
Spatial K-fold cross-validator. Modification of standard
CV to overcome biased prediction performance of estimates
due to autocorrelation in spatial data. Overoptimistic bias
in performance is prevented by ensuring spatial proximity of
test data, and maintaing a training set that is within a certain
spatial distance from the test dataset.
When K=N, SKCV becomes a spatial leave-one-out (SLOO) cross-validator.
Yields indices to split data into training and test sets.
Parameters
----------
n_splits : int, default=10
Number of folds. Must be at least 2.
buffer_radius : integer, default=0
Buffer radius (dead zone) to exclude training points that are
within a defined distance of test data within a fold.
random_state : int, RandomState instance or None, optional, default=None
random_state is the seed used by the random number generator.
Examples
--------
"""
def __init__(
self,
n_splits=10,
buffer_radius = 0,
random_state = None
):
self.n_splits = n_splits
self.buffer_radius = buffer_radius
self.random_state = random_state
def _iter_test_indices(self, XYs):
"""
Generates integer indices corresponding to test sets.
Parameters
----------
X : GeoSeries
GeoSeries containing shapely Points that identify Easting
and Northing coordinates of data points.
Yields
------
test_indices : array
The testing set indices for that fold.
train_exclude : array
The training set indices to exclude for that fold.
"""
if self.n_splits > len(XYs) :
raise ValueError(
"Number of specified n_splits (folds) is larger than number of data points. Given {} observations and {} folds.".format(
len(XYs), self.n_splits
)
)
sloo = len(XYs) == self.n_splits
lattice = any(XYs.geom_type == 'Polygon') or any(XYs.geom_type == 'MultiPolygon')
# If K = N, SLOO
if sloo:
num_samples = XYs.shape[0]
indices_from_folds = np.arange(num_samples)
else:
# Partion XYs space into folds
XYs_to_2d = geometry_to_2d(XYs)
km_skcv = MiniBatchKMeans(n_clusters = self.n_splits, random_state=self.random_state)
labels = km_skcv.fit(XYs_to_2d).labels_
uniques, counts = np.unique(labels, return_counts=True)
check_fold_n = (counts < 2)
if check_fold_n.any():
warn = "{} folds contain less than three points and do not form polygons.".format( check_fold_n.sum() )
warnings.warn(warn)
indices_from_folds = [np.argwhere(labels == i).reshape(-1)
for i in uniques]
for fold_indices in indices_from_folds:
if sloo:
test_indices = np.array([fold_indices])
fold_polygon = XYs.loc[test_indices].buffer(self.buffer_radius)
elif lattice:
test_indices = np.array(fold_indices)
fold_polygon = XYs.loc[test_indices].unary_union.buffer(self.buffer_radius)
else: # skcv
test_indices = np.array(fold_indices)
fold_polygon = XYs.loc[test_indices].unary_union.convex_hull.buffer(self.buffer_radius)
test_indices, train_exclude = \
super()._remove_buffered_indices(XYs, test_indices,
self.buffer_radius, fold_polygon)
yield test_indices, train_exclude
class RepeatedSKCV(SKCV):
"""
Repeats Spatial K-Fold cross-validation (SKCV) n times with different
K-means randomization in each repetition. Given the sensitivity of K-means
to the initial, randomly initialised starting values of the centroid seeds,
RepeatedSKCV repeats SKCV a number of times, yielding a generator of
n_repeats * K test and training splits.
Parameters
----------
n_splits : int, default=10
Number of folds. Must be at least 2.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : int, RandomState instance or None, optional, default=None
If int, random_state is the seed used by the random number generator.
If None, the random number generator is the RandomState instance used
by `np.random`.
**cvargs : additional params
Constructor parameters for cv.
"""
def __init__(
self,
n_repeats=10,
n_splits=10,
random_state=None,
**cvargs
):
if not isinstance(n_repeats, numbers.Integral):
raise ValueError("Number of repetitions must be of Integral type.")
if n_repeats <= 0:
raise ValueError("Number of repetitions must be greater than 0.")
self.cv = SKCV
self.n_repeats = n_repeats
self.n_splits = n_splits
self.cvargs = cvargs
def split(self, XYs):
n_repeats = self.n_repeats
for idx in range(n_repeats):
cv = self.cv(self.n_splits, **self.cvargs)
for train_index, test_index in cv.split(XYs):
yield train_index, test_index
class UserDefinedSCV(BaseSpatialCV):
"""
Spatial cross-validation using user-defined polygons.
Yields indices to split data into training and test sets.
Parameters
----------
custom_polygons : string, GeoSeries
File path to user defined grid polygons used to assign data
points into folds.
buffer_radius : integer, default=0
Buffer radius (dead zone) to exclude training points that are
within a defined distance of test data within a fold.
distance_metric : string, default='euclidean'
Distance metric used to reconcile points that sit at exact
border between two grids. Defaults to euclidean assuming
projected coordinate system, otherwise use haversine for
unprojected spaces.
Yields
------
test_indices : array
The testing set indices for that fold.
train_exclude : array
The training set indices to exclude for that fold.
"""
def __init__(
self,
custom_polygons,
buffer_radius = 0,
distance_metric = 'euclidean'
):
self.buffer_radius = | |
<reponame>akutta/hercules
#!/usr/bin/env python3
import argparse
from datetime import datetime, timedelta
from importlib import import_module
import io
import json
import os
import re
import shutil
import sys
import tempfile
import threading
import time
import warnings
try:
from clint.textui import progress
except ImportError:
print("Warning: clint is not installed, no fancy progressbars in the terminal for you.")
progress = None
import numpy
import yaml
if sys.version_info[0] < 3:
# OK, ancients, I will support Python 2, but you owe me a beer
input = raw_input # noqa: F821
PB_MESSAGES = {
"Burndown": "internal.pb.pb_pb2.BurndownAnalysisResults",
"Couples": "internal.pb.pb_pb2.CouplesAnalysisResults",
"Shotness": "internal.pb.pb_pb2.ShotnessAnalysisResults",
}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output", default="",
help="Path to the output file/directory (empty for display). "
"If the extension is JSON, the data is saved instead of "
"the real image.")
parser.add_argument("-i", "--input", default="-",
help="Path to the input file (- for stdin).")
parser.add_argument("-f", "--input-format", default="auto", choices=["yaml", "pb", "auto"])
parser.add_argument("--text-size", default=12, type=int,
help="Size of the labels and legend.")
parser.add_argument("--backend", help="Matplotlib backend to use.")
parser.add_argument("--style", choices=["black", "white"], default="black",
help="Plot's general color scheme.")
parser.add_argument("--size", help="Axes' size in inches, for example \"12,9\"")
parser.add_argument("--relative", action="store_true",
help="Occupy 100%% height for every measurement.")
parser.add_argument("--couples-tmp-dir", help="Temporary directory to work with couples.")
parser.add_argument("-m", "--mode",
choices=["project", "file", "person", "churn_matrix", "ownership",
"couples", "shotness", "sentiment", "all", "run_times"],
help="What to plot.")
parser.add_argument(
"--resample", default="year",
help="The way to resample the time series. Possible values are: "
"\"month\", \"year\", \"no\", \"raw\" and pandas offset aliases ("
"http://pandas.pydata.org/pandas-docs/stable/timeseries.html"
"#offset-aliases).")
parser.add_argument("--disable-projector", action="store_true",
help="Do not run Tensorflow Projector on couples.")
parser.add_argument("--max-people", default=20, type=int,
help="Maximum number of developers in churn matrix and people plots.")
args = parser.parse_args()
return args
class Reader(object):
def read(self, file):
raise NotImplementedError
def get_name(self):
raise NotImplementedError
def get_header(self):
raise NotImplementedError
def get_burndown_parameters(self):
raise NotImplementedError
def get_project_burndown(self):
raise NotImplementedError
def get_files_burndown(self):
raise NotImplementedError
def get_people_burndown(self):
raise NotImplementedError
def get_ownership_burndown(self):
raise NotImplementedError
def get_people_interaction(self):
raise NotImplementedError
def get_files_coocc(self):
raise NotImplementedError
def get_people_coocc(self):
raise NotImplementedError
def get_shotness_coocc(self):
raise NotImplementedError
def get_shotness(self):
raise NotImplementedError
class YamlReader(Reader):
def read(self, file):
yaml.reader.Reader.NON_PRINTABLE = re.compile(r"(?!x)x")
try:
loader = yaml.CLoader
except AttributeError:
print("Warning: failed to import yaml.CLoader, falling back to slow yaml.Loader")
loader = yaml.Loader
try:
if file != "-":
with open(file) as fin:
data = yaml.load(fin, Loader=loader)
else:
data = yaml.load(sys.stdin, Loader=loader)
except (UnicodeEncodeError, yaml.reader.ReaderError) as e:
print("\nInvalid unicode in the input: %s\nPlease filter it through "
"fix_yaml_unicode.py" % e)
sys.exit(1)
self.data = data
def get_run_times(self):
return {}
def get_name(self):
return self.data["hercules"]["repository"]
def get_header(self):
header = self.data["hercules"]
return header["begin_unix_time"], header["end_unix_time"]
def get_burndown_parameters(self):
header = self.data["Burndown"]
return header["sampling"], header["granularity"]
def get_project_burndown(self):
return self.data["hercules"]["repository"], \
self._parse_burndown_matrix(self.data["Burndown"]["project"]).T
def get_files_burndown(self):
return [(p[0], self._parse_burndown_matrix(p[1]).T)
for p in self.data["Burndown"]["files"].items()]
def get_people_burndown(self):
return [(p[0], self._parse_burndown_matrix(p[1]).T)
for p in self.data["Burndown"]["people"].items()]
def get_ownership_burndown(self):
return self.data["Burndown"]["people_sequence"].copy(),\
{p[0]: self._parse_burndown_matrix(p[1])
for p in self.data["Burndown"]["people"].items()}
def get_people_interaction(self):
return self.data["Burndown"]["people_sequence"].copy(), \
self._parse_burndown_matrix(self.data["Burndown"]["people_interaction"])
def get_files_coocc(self):
coocc = self.data["Couples"]["files_coocc"]
return coocc["index"], self._parse_coocc_matrix(coocc["matrix"])
def get_people_coocc(self):
coocc = self.data["Couples"]["people_coocc"]
return coocc["index"], self._parse_coocc_matrix(coocc["matrix"])
def get_shotness_coocc(self):
shotness = self.data["Shotness"]
index = ["%s:%s" % (i["file"], i["name"]) for i in shotness]
indptr = numpy.zeros(len(shotness) + 1, dtype=numpy.int64)
indices = []
data = []
for i, record in enumerate(shotness):
pairs = [(int(k), v) for k, v in record["counters"].items()]
pairs.sort()
indptr[i + 1] = indptr[i] + len(pairs)
for k, v in pairs:
indices.append(k)
data.append(v)
indices = numpy.array(indices, dtype=numpy.int32)
data = numpy.array(data, dtype=numpy.int32)
from scipy.sparse import csr_matrix
return index, csr_matrix((data, indices, indptr), shape=(len(shotness),) * 2)
def get_shotness(self):
from munch import munchify
obj = munchify(self.data["Shotness"])
# turn strings into ints
for item in obj:
item.counters = {int(k): v for k, v in item.counters.items()}
if len(obj) == 0:
raise KeyError
return obj
def get_sentiment(self):
from munch import munchify
return munchify({int(key): {
"Comments": vals[2].split("|"),
"Commits": vals[1],
"Value": float(vals[0])
} for key, vals in self.data["Sentiment"].items()})
def _parse_burndown_matrix(self, matrix):
return numpy.array([numpy.fromstring(line, dtype=int, sep=" ")
for line in matrix.split("\n")])
def _parse_coocc_matrix(self, matrix):
from scipy.sparse import csr_matrix
data = []
indices = []
indptr = [0]
for row in matrix:
for k, v in sorted(row.items()):
data.append(v)
indices.append(k)
indptr.append(indptr[-1] + len(row))
return csr_matrix((data, indices, indptr), shape=(len(matrix),) * 2)
class ProtobufReader(Reader):
def read(self, file):
try:
from internal.pb.pb_pb2 import AnalysisResults
except ImportError as e:
print("\n\n>>> You need to generate internal/pb/pb_pb2.py - run \"make\"\n",
file=sys.stderr)
raise e from None
self.data = AnalysisResults()
if file != "-":
with open(file, "rb") as fin:
self.data.ParseFromString(fin.read())
else:
self.data.ParseFromString(sys.stdin.buffer.read())
self.contents = {}
for key, val in self.data.contents.items():
try:
mod, name = PB_MESSAGES[key].rsplit(".", 1)
except KeyError:
sys.stderr.write("Warning: there is no registered PB decoder for %s\n" % key)
continue
cls = getattr(import_module(mod), name)
self.contents[key] = msg = cls()
msg.ParseFromString(val)
def get_run_times(self):
return {key: val for key, val in self.data.header.run_time_per_item.items()}
def get_name(self):
return self.data.header.repository
def get_header(self):
header = self.data.header
return header.begin_unix_time, header.end_unix_time
def get_burndown_parameters(self):
burndown = self.contents["Burndown"]
return burndown.sampling, burndown.granularity
def get_project_burndown(self):
return self._parse_burndown_matrix(self.contents["Burndown"].project)
def get_files_burndown(self):
return [self._parse_burndown_matrix(i) for i in self.contents["Burndown"].files]
def get_people_burndown(self):
return [self._parse_burndown_matrix(i) for i in self.contents["Burndown"].people]
def get_ownership_burndown(self):
people = self.get_people_burndown()
return [p[0] for p in people], {p[0]: p[1].T for p in people}
def get_people_interaction(self):
burndown = self.contents["Burndown"]
return [i.name for i in burndown.people], \
self._parse_sparse_matrix(burndown.people_interaction).toarray()
def get_files_coocc(self):
node = self.contents["Couples"].file_couples
return list(node.index), self._parse_sparse_matrix(node.matrix)
def get_people_coocc(self):
node = self.contents["Couples"].people_couples
return list(node.index), self._parse_sparse_matrix(node.matrix)
def get_shotness_coocc(self):
shotness = self.get_shotness()
index = ["%s:%s" % (i.file, i.name) for i in shotness]
indptr = numpy.zeros(len(shotness) + 1, dtype=numpy.int32)
indices = []
data = []
for i, record in enumerate(shotness):
pairs = list(record.counters.items())
pairs.sort()
indptr[i + 1] = indptr[i] + len(pairs)
for k, v in pairs:
indices.append(k)
data.append(v)
indices = numpy.array(indices, dtype=numpy.int32)
data = numpy.array(data, dtype=numpy.int32)
from scipy.sparse import csr_matrix
return index, csr_matrix((data, indices, indptr), shape=(len(shotness),) * 2)
def get_shotness(self):
records = self.contents["Shotness"].records
if len(records) == 0:
raise KeyError
return records
def get_sentiment(self):
byday = self.contents["Sentiment"].SentimentByDay
if len(byday) == 0:
raise KeyError
return byday
def _parse_burndown_matrix(self, matrix):
dense = numpy.zeros((matrix.number_of_rows, matrix.number_of_columns), dtype=int)
for y, row in enumerate(matrix.rows):
for x, col in enumerate(row.columns):
dense[y, x] = col
return matrix.name, dense.T
def _parse_sparse_matrix(self, matrix):
from scipy.sparse import csr_matrix
return csr_matrix((list(matrix.data), list(matrix.indices), list(matrix.indptr)),
shape=(matrix.number_of_rows, matrix.number_of_columns))
READERS = {"yaml": YamlReader, "yml": YamlReader, "pb": ProtobufReader}
def read_input(args):
sys.stdout.write("Reading the input... ")
sys.stdout.flush()
if args.input != "-":
if args.input_format == "auto":
args.input_format = args.input.rsplit(".", 1)[1]
elif args.input_format == "auto":
args.input_format = "yaml"
reader = READERS[args.input_format]()
reader.read(args.input)
print("done")
return reader
def calculate_average_lifetime(matrix):
lifetimes = numpy.zeros(matrix.shape[1] - 1)
for band in matrix:
start = 0
for i, line in enumerate(band):
if i == 0 or band[i - 1] == 0:
start += 1
continue
lifetimes[i - start] = band[i - 1] - line
lifetimes[i - start] = band[i - 1]
lsum = lifetimes.sum()
if lsum != 0:
return (lifetimes.dot(numpy.arange(1, matrix.shape[1], 1))
/ (lsum * matrix.shape[1]))
return numpy.nan
def interpolate_burndown_matrix(matrix, granularity, sampling):
daily = numpy.zeros(
(matrix.shape[0] * granularity, matrix.shape[1] * sampling),
dtype=numpy.float32)
"""
----------> samples, x
|
|
|
⌄
bands, y
"""
for y in range(matrix.shape[0]):
for x in range(matrix.shape[1]):
if y * granularity > (x + 1) * sampling:
# the future is zeros
continue
def decay(start_index: int, start_val: float):
if start_val == 0:
return
k = matrix[y][x] / start_val # <= 1
scale = (x + 1) * sampling - start_index
for i in range(y * granularity, (y + 1) * granularity):
initial = daily[i][start_index - 1]
for j in range(start_index, (x + 1) * sampling):
daily[i][j] = initial * (
1 + (k - 1) * (j - start_index + 1) / scale)
def grow(finish_index: int, finish_val: float):
initial = matrix[y][x - 1] if x > 0 else 0
start_index = x * sampling
if start_index < y * granularity:
start_index = y * granularity
if finish_index == start_index:
return
avg = (finish_val - initial) / (finish_index - start_index)
for j in range(x * sampling, finish_index):
for i in range(start_index, j + 1):
daily[i][j] = avg
# copy [x*g..y*s)
for j in range(x * sampling, finish_index):
for i in range(y * granularity, x * sampling):
daily[i][j] = daily[i][j - 1]
if (y + 1) * granularity >= (x + 1) * sampling:
# x*granularity <= (y+1)*sampling
# 1. x*granularity <= y*sampling
# y*sampling..(y+1)sampling
#
# x+1
# /
# /
# / y+1 -|
# / |
# / y -|
# /
# / x
| |
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from enum import Enum
import re
import os
from autohandshake.src.exceptions import (
InvalidURLError, NoSuchElementError, WrongPageForMethodError,
InsufficientPermissionsError, InvalidUserTypeError
)
from autohandshake.src.constants import MAX_WAIT_TIME
from autohandshake.src.constants import BASE_URL
class UserType(Enum):
"""
The possible user types in Handshake
* Employer - a Handshake employer account
* Staff - a career services staff/admin account
* Student - a student or alumni account
"""
EMPLOYER = 'Employers'
STAFF = 'Career Services'
STUDENT = 'Students'
class HandshakeBrowser:
"""
An automated browser for navigating Handshake.
Since a logged-in instance of this class is returned by HandshakeSession's
__enter__ method, it does not usually need to be manually instantiated. Additionally,
for most use cases, the user only needs to pass a HandshakeBrowser object to
a Page object, then let the Page's methods do the heavy-lifting.
For example, you almost never need to write:
::
browser = HandshakeBrowser()
The vast majority of use cases look something like:
::
with HandshakeSession(school_url, email) as browser:
some_page = SomePage(browser)
some_page.do_something()
If you need to specify a custom max_wait_time, that can be done through the
HandshakeSession object:
::
# this
with HandshakeSession(school_url, email, max_wait_time = 60) as browser:
some_page = SomePage(browser)
# not this
browser = HandshakeBrowser(max_wait_time = 60)
"""
def __init__(self, max_wait_time: int = MAX_WAIT_TIME, chromedriver_path: str = None, download_dir: str = None):
"""
:param max_wait_time: the maximum time (in seconds) to wait for an element
to load before throwing a timeout error
:type max_wait_time: int
:param chromedriver_path: the filepath to chromedriver.exe. If not specified, the package's own driver will be used
:type chromedriver_path: str
:param download_dir: the directory in which to download any files. If not
specified, defaults to system's default download location.
:type download_dir: str
"""
options = webdriver.ChromeOptions()
options.add_argument('--window-size=1920,1080')
if download_dir:
options.add_experimental_option('prefs', {'download.default_directory': download_dir})
dirname = os.path.dirname(__file__)
if not chromedriver_path:
chromedriver_path = os.path.join(dirname, '../chromedriver.exe')
self._browser = webdriver.Chrome(executable_path=chromedriver_path,
options=options)
self.max_wait_time = max_wait_time
self._user_type = None
def get(self, url: str):
"""Go to the web page specified by the given Handshake url.
:param url: the url to visit. Must be of the form
"https://[...].joinhandshake.com[/...]"
:type url: str
"""
self._validate_url_str(url)
self._browser.get(url)
self._validate_page_exists()
self._validate_permissions()
def quit(self):
"""Close the browser"""
self._browser.quit()
def element_exists_by_xpath(self, xpath: str) -> bool:
"""
Determine whether or not an element with the given xpath exists in the page.
:param xpath: the xpath of the element to search for
:type xpath: str
:return: True if the element exists, false otherwise
:rtype: bool
"""
try:
self._browser.find_element_by_xpath(xpath)
return True
except NoSuchElementException:
return False
def wait_until_element_exists_by_xpath(self, xpath: str):
"""
Wait until an element with the given xpath exists on the page.
:param xpath: the xpath of the element to wait for
:type xpath: str
"""
try:
WebDriverWait(self._browser, self.max_wait_time).until(
EC.visibility_of_element_located((By.XPATH, xpath)))
except TimeoutException:
raise TimeoutError(f"Element with xpath {xpath} did not appear in time")
def wait_until_element_does_not_exist_by_xpath(self, xpath: str):
"""
Wait until an element with the given xpath exists on the page.
:param xpath: the xpath of the element to wait for
:type xpath: str
"""
try:
WebDriverWait(self._browser, self.max_wait_time).until(
EC.invisibility_of_element_located((By.XPATH, xpath)))
except TimeoutException:
raise TimeoutError(f"Element with xpath {xpath} did not disappear in tme")
def wait_until_element_is_clickable_by_xpath(self, xpath: str):
"""
Wait until an element with the given xpath is clickable.
:param xpath: the xpath of the element to wait for
:type xpath: str
"""
try:
WebDriverWait(self._browser, self.max_wait_time).until(
EC.element_to_be_clickable((By.XPATH, xpath)))
except TimeoutException:
raise TimeoutError(f"Element with xpath {xpath} did not become clickable")
def send_text_to_element_by_xpath(self, xpath: str, text: str, clear: bool = True):
"""
Send a string to an input field identified by the given xpath
:param text: the text to send
:type text: str
:param xpath: the xpath of the input field to which to send the text
:type xpath: str
:param clear: whether or not to clear the field before sending text. If
False, text will be appended to any text already present.
:type clear: bool
"""
try:
element = self._browser.find_element_by_xpath(xpath)
if clear:
element.clear()
element.send_keys(text)
except NoSuchElementException:
raise NoSuchElementError(f'No element found for xpath: "{xpath}"')
def click_element_by_xpath(self, xpath):
"""
Click an element on the page given its xpath
:param xpath: the xpath of the element to click
:type xpath: str
"""
try:
self._browser.find_element_by_xpath(xpath).click()
except NoSuchElementException:
raise NoSuchElementError(f'No element found for xpath: "{xpath}"')
def wait_then_click_element_by_xpath(self, xpath):
"""
Click an element on the page given its xpath after waiting to make sure
it exists
:param xpath: the xpath of the element to click
:type xpath: str
"""
self.wait_until_element_exists_by_xpath(xpath)
self.click_element_by_xpath(xpath)
def get_element_attribute_by_xpath(self, xpath: str, attribute: str) -> str:
"""
Get the value of the given attribute from the element with the given xpath
:param xpath: the xpath of the element of interest
:type xpath: str
:param attribute: the name of the attribute of interest, e.g. 'value'
:type attribute: str
:return: the value of the attribute on the element of interest
:rtype: str
"""
try:
if attribute.lower() == 'text':
return self._browser.find_element_by_xpath(xpath).text
return self._browser.find_element_by_xpath(xpath).get_attribute(attribute)
except NoSuchElementException:
raise NoSuchElementError(f'No element found for xpath: "{xpath}"')
def get_elements_attribute_by_xpath(self, xpath: str, attribute: str) -> list:
"""
Get the value of a given attribute for all elements with the given xpath
:param xpath: the xpath of the elements of interest
:type xpath: str
:param attribute: the name of the attribute of interest, e.g. 'value'
:type attribute: str
:return: a list of values of the given attribute for each matching element
:rtype: list
"""
try:
elements = self._browser.find_elements_by_xpath(xpath)
if attribute.lower() == 'text':
return [element.text for element in elements]
return [element.get_attribute(attribute) for element in elements]
except NoSuchElementException:
raise NoSuchElementError(f'No elements found for xpath: "{xpath}"')
def execute_script_on_element_by_xpath(self, script: str, xpath: str = None):
"""
Execute the given javascript expression. If xpath of element is provided,
the element becomes available to use in the script, and can be accessed
using arguments[0].
:param script: the javascript to be executed
:type script: str
:param xpath: the xpath of the optional element to be passed to the script
:type xpath: str
"""
if not xpath:
self._browser.execute_script(script)
else:
try:
self._browser.execute_script(script, self._browser.find_element_by_xpath(xpath))
except NoSuchElementException:
raise NoSuchElementError(f'No elements found for xpath: "{xpath}"')
def element_is_selected_by_xpath(self, xpath: str) -> bool:
"""Get whether or not the element specified by the given xpath is selected
:param xpath: the xpath of the elements of interest
:type xpath: str
:return: True if the element is selected, False otherwise
:rtype: bool
"""
try:
return self._browser.find_element_by_xpath(xpath).is_selected()
except:
raise NoSuchElementError(f'No elements found for xpath: "{xpath}"')
def switch_to_frame_by_xpath(self, xpath):
try:
frame = self._browser.find_element_by_xpath(xpath)
self._browser.switch_to.frame(frame)
except NoSuchElementException:
raise NoSuchElementError(f'No elements found for xpath: "{xpath}"')
def update_constants(self):
"""
Update any Handshake environment constants such as School ID or User ID.
This should be done every time the browser switches user types and upon
initial login.
"""
if self._user_type == UserType.EMPLOYER:
self.employer_id = self._get_meta_constant('logged_in_user_institution_id')
else:
self.school_id = self._get_meta_constant('logged_in_user_institution_id')
self.user_id = self._get_meta_constant('logged_in_user_id')
self._user_type = UserType(self._get_meta_constant('current_user_type'))
def _get_meta_constant(self, name: str) -> str:
"""
Get the content of a meta tag with the given name.
The method is used to pull data like the current school id, user id,
or employer id from Handshake's <meta> tags. All tags are of the form:
<meta content="foo" name="bar">. Given "bar," this method returns "foo".
:param name: the name of the meta tag to query
:type name: str
:return: the content of the meta tag
:rtype: str
"""
return self.get_element_attribute_by_xpath(f'//meta[@name="{name}"]',
'content')
def switch_to_new_tab(self):
"""Wait for the new tab to finish loaded, then switch to it."""
WebDriverWait(self._browser, self.max_wait_time).until(
EC.number_of_windows_to_be(2))
self._browser.switch_to.window(self._browser.window_handles[-1])
def return_to_main_tab(self):
"""With a second tab open, close the current tab and return to the main tab."""
self._browser.execute_script('window.close();')
self._browser.switch_to.window(self._browser.window_handles[0])
def maximize_window(self):
"""Maximize the browser window."""
self._browser.maximize_window()
@property
def user_type(self) -> UserType:
"""
Get the user type of the account currently logged into Handshake.
:return: the browser's currently-logged-in user type
:rtype: UserType
"""
return self._user_type
def switch_users(self, user_type: UserType):
"""
Switch to the system view specified by the given user type.
This method automates the built-in "Switch Users" function in Handshake.
:param user_type: the user type to which to switch
:type user_type: UserType
"""
if | |
+ config['content_type']
base_field_override_response = issue_request(config, 'GET', base_field_override_url, headers)
if base_field_override_response.status_code == 200:
field_config = json.loads(base_field_override_response.text)
for item in field_config['data']:
field_name = item['attributes']['field_name']
required = item['attributes']['required']
field_type = item['attributes']['field_type']
entity_type = item['attributes']['entity_type']
field_definitions[field_name] = {
'cardinality': 1,
'field_type': field_type,
'required': required,
'entity_type': entity_type
}
# Hacky implementation of parsing Drupal's JSON:API pager.
offset = 0
while 'next' in field_config['links']:
base_field_override_response = issue_request(config, 'GET', base_field_override_url, headers, '', '', {'page[offset]': offset, 'page[limit]': '50'})
field_config = json.loads(base_field_override_response.text)
for item in field_config['data']:
field_name = item['attributes']['field_name']
required = item['attributes']['required']
field_type = item['attributes']['field_type']
entity_type = item['attributes']['entity_type']
field_definitions[field_name] = {
'cardinality': 1,
'field_type': field_type,
'required': required,
'entity_type': entity_type
}
if 'next' not in field_config['links']:
break
return field_definitions
def check_input(config, args):
"""Validate the config file and input data.
"""
logging.info('Starting configuration check for "%s" task using config file %s.', config['task'], args.config)
ping_islandora(config)
# Check the config file.
tasks = ['create', 'update', 'delete', 'add_media', 'delete_media', 'create_from_files']
joiner = ', '
if config['task'] not in tasks:
message = '"task" in your configuration file must be one of "create", "update", "delete", "add_media", or "create_from_files".'
logging.error(message)
sys.exit('Error: ' + message)
config_keys = list(config.keys())
config_keys.remove('check')
# Dealing with optional config keys. If you introduce a new optional key, add it to this list. Note that optional
# keys are not validated.
optional_config_keys = ['delimiter', 'subdelimiter', 'log_file_path', 'log_file_mode',
'allow_missing_files', 'preprocessors', 'bootstrap', 'published',
'validate_title_length', 'media_type', 'media_types', 'pause',
'output_csv', 'delete_media_with_nodes', 'paged_content_from_directories',
'paged_content_sequence_seprator', 'paged_content_page_model_tid',
'paged_content_page_display_hints', 'paged_content_page_content_type',
'allow_adding_terms', 'log_json', 'user_agent', 'allow_redirects']
for optional_config_key in optional_config_keys:
if optional_config_key in config_keys:
config_keys.remove(optional_config_key)
# Check for presence of required config keys.
if config['task'] == 'create':
create_options = ['task', 'host', 'username', 'password', 'content_type',
'input_dir', 'input_csv', 'media_use_tid',
'drupal_filesystem', 'id_field']
if not set(config_keys) == set(create_options):
message = 'Please check your config file for required values: ' + joiner.join(create_options) + '.'
logging.error(message)
sys.exit('Error: ' + message)
if config['task'] == 'update':
update_options = ['task', 'host', 'username', 'password',
'content_type', 'input_dir', 'input_csv']
if not set(config_keys) == set(update_options):
message = 'Please check your config file for required values: ' + joiner.join(update_options) + '.'
logging.error(message)
sys.exit('Error: ' + message)
if config['task'] == 'delete':
delete_options = ['task', 'host', 'username', 'password',
'input_dir', 'input_csv']
if not set(config_keys) == set(delete_options):
message = 'Please check your config file for required values: ' + joiner.join(delete_options) + '.'
logging.error(message)
sys.exit('Error: ' + message)
if config['task'] == 'add_media':
add_media_options = ['task', 'host', 'username', 'password',
'input_dir', 'input_csv', 'media_use_tid',
'drupal_filesystem']
if not set(config_keys) == set(add_media_options):
message = 'Please check your config file for required values: ' + joiner.join(add_media_options) + '.'
logging.error(message)
sys.exit('Error: ' + message)
if config['task'] == 'delete_media':
delete_media_options = ['task', 'host', 'username', 'password',
'input_dir', 'input_csv']
if not set(config_keys) == set(delete_media_options):
message = 'Please check your config file for required values: ' + joiner.join(delete_media_options) + '.'
logging.error(message)
sys.exit('Error: ' + message)
message = 'OK, configuration file has all required values (did not check for optional values).'
print(message)
logging.info(message)
# Check existence of CSV file.
input_csv = os.path.join(config['input_dir'], config['input_csv'])
if os.path.exists(input_csv):
message = 'OK, CSV file ' + input_csv + ' found.'
print(message)
logging.info(message)
else:
message = 'CSV file ' + input_csv + ' not found.'
logging.error(message)
sys.exit('Error: ' + message)
# Check column headers in CSV file.
csv_data = get_csv_data(config['input_dir'], config['input_csv'], config['delimiter'])
csv_column_headers = csv_data.fieldnames
# Check whether each row contains the same number of columns as there
# are headers.
for count, row in enumerate(csv_data, start=1):
string_field_count = 0
for field in row:
if (row[field] is not None):
string_field_count += 1
if len(csv_column_headers) > string_field_count:
logging.error("Row %s of your CSV file does not " +
"have same number of columns (%s) as there are headers " +
"(%s).", str(count), str(string_field_count), str(len(csv_column_headers)))
sys.exit("Error: Row " + str(count) + " of your CSV file " +
"does not have same number of columns (" + str(string_field_count) +
") as there are headers (" + str(len(csv_column_headers)) + ").")
if len(csv_column_headers) < string_field_count:
logging.error("Row %s of your CSV file has more columns than there are headers " +
"(%s).", str(count), str(string_field_count), str(len(csv_column_headers)))
sys.exit("Error: Row " + str(count) + " of your CSV file " +
"has more columns than there are headers (" + str(len(csv_column_headers)) + ").")
message = "OK, all " + str(count) + " rows in the CSV file have the same number of columns as there are headers (" + str(len(csv_column_headers)) + ")."
print(message)
logging.info(message)
# Task-specific CSV checks.
langcode_was_present = False
if config['task'] == 'create':
field_definitions = get_field_definitions(config)
if config['id_field'] not in csv_column_headers:
message = 'For "create" tasks, your CSV file must have a column containing a unique identifier.'
logging.error(message)
sys.exit('Error: ' + message)
if 'file' not in csv_column_headers and config['paged_content_from_directories'] is False:
message = 'For "create" tasks, your CSV file must contain a "file" column.'
logging.error(message)
sys.exit('Error: ' + message)
if 'title' not in csv_column_headers:
message = 'For "create" tasks, your CSV file must contain a "title" column.'
logging.error(message)
sys.exit('Error: ' + message)
if 'output_csv' in config.keys():
if os.path.exists(config['output_csv']):
message = 'Output CSV already exists at ' + config['output_csv'] + ', records will be appended to it.'
print(message)
logging.info(message)
# Specific to creating paged content. Current, if 'parent_id' is present in the CSV file, so must 'field_weight' and 'field_member_of'.
if 'parent_id' in csv_column_headers:
if ('field_weight' not in csv_column_headers or 'field_member_of' not in csv_column_headers):
message = 'If your CSV file contains a "parent_id" column, it must also contain "field_weight" and "field_member_of" columns.'
logging.error(message)
sys.exit('Error: ' + message)
drupal_fieldnames = []
for drupal_fieldname in field_definitions:
drupal_fieldnames.append(drupal_fieldname)
if len(drupal_fieldnames) == 0:
message = "Can't retrieve field definitions from Drupal. Please confirm that the JSON:API module is enabled."
logging.error(message)
sys.exit('Error: ' + message)
# We .remove() CSV column headers for this check because they are not Drupal field names (including 'langcode').
# Any new columns introduced into the CSV need to be removed here.
if config['id_field'] in csv_column_headers:
csv_column_headers.remove(config['id_field'])
if 'file' in csv_column_headers:
csv_column_headers.remove('file')
if 'node_id' in csv_column_headers:
csv_column_headers.remove('node_id')
if 'parent_id' in csv_column_headers:
csv_column_headers.remove('parent_id')
# langcode is a standard Drupal field but it doesn't show up in any field configs.
if 'langcode' in csv_column_headers:
csv_column_headers.remove('langcode')
# Set this so we can validate langcode below.
langcode_was_present = True
for csv_column_header in csv_column_headers:
if csv_column_header not in drupal_fieldnames:
logging.error("CSV column header %s does not appear to match any Drupal field names.", csv_column_header)
sys.exit('Error: CSV column header "' + csv_column_header + '" does not appear to match any Drupal field names.')
message = 'OK, CSV column headers match Drupal field names.'
print(message)
logging.info(message)
# Check that Drupal fields that are required are in the CSV file (create task only).
if config['task'] == 'create':
required_drupal_fields = []
for drupal_fieldname in field_definitions:
# In the create task, we only check for required fields that apply to nodes.
if 'entity_type' in field_definitions[drupal_fieldname] and field_definitions[drupal_fieldname]['entity_type'] == 'node':
if 'required' in field_definitions[drupal_fieldname] and field_definitions[drupal_fieldname]['required'] is True:
required_drupal_fields.append(drupal_fieldname)
for required_drupal_field in required_drupal_fields:
if required_drupal_field not in csv_column_headers:
logging.error("Required Drupal field %s is not present in the CSV file.", required_drupal_field)
sys.exit('Error: Required Drupal field "' + required_drupal_field + '" is not present in the CSV file.')
message = 'OK, required Drupal fields are present in the CSV file.'
print(message)
logging.info(message)
if config['task'] == 'update':
if 'node_id' not in csv_column_headers:
message = 'For "update" tasks, your CSV file must contain a "node_id" column.'
logging.error(message)
sys.exit('Error: ' + message)
field_definitions = get_field_definitions(config)
drupal_fieldnames = []
for drupal_fieldname in field_definitions:
drupal_fieldnames.append(drupal_fieldname)
if 'title' in csv_column_headers:
csv_column_headers.remove('title')
if 'file' in csv_column_headers:
message = 'Error: CSV column header "file" is not allowed in update tasks.'
logging.error(message)
sys.exit(message)
if 'node_id' in csv_column_headers:
csv_column_headers.remove('node_id')
for csv_column_header in csv_column_headers:
if csv_column_header not in drupal_fieldnames:
logging.error('CSV column header %s does not appear to match any Drupal field names.', csv_column_header)
sys.exit('Error: CSV column header "' + csv_column_header + '" does not appear to match any Drupal field names.')
message = 'OK, CSV column headers match Drupal field names.'
print(message)
logging.info(message)
if config['task'] == 'update' or config['task'] == 'create':
# Validate values in fields | |
else 75,
nameScale=0.8 if isFirst else 1.0,
flatness=0.0 if isFirst else 1.0,
shadow=0.5 if isFirst else 1.0,
showDeath=True if isFirst else False,
showLives=False))
x += xOffs * (0.8 if isFirst else 0.56)
isFirst = False
testLives += 1
# non-solo mode
else:
for team in self.teams:
if team.getID() == 0:
x = -50
xOffs = -85
else:
x = 50
xOffs = 85
for player in team.players:
for icon in player.gameData['icons']:
icon.setPositionAndScale((x,30),0.7)
icon.updateForLives()
x += xOffs
def _getSpawnPoint(self,player):
# in solo-mode, if there's an existing live player on the map, spawn at whichever
# spot is farthest from them (keeps the action spread out)
if self._soloMode:
livingPlayer = None
for team in self.teams:
for player in team.players:
if player.isAlive():
p = player.actor.node.position
livingPlayer = player
livingPlayerPos = p
break
if livingPlayer:
playerPos = bs.Vector(*livingPlayerPos)
points = []
for team in self.teams:
startPos = bs.Vector(*self.getMap().getStartPosition(team.getID()))
points.append([(startPos-playerPos).length(),startPos])
points.sort()
return points[-1][1]
else:
return None
else:
return None
def spawnPlayer(self,player):
"""This next line is the default spawn line. But we need to spawn our special guy"""
#self.spawnPlayerSpaz(player,self._getSpawnPoint(player))
#position = self._getSpawnPoint(player)
#if isinstance(self.getSession(), bs.TeamsSession):
# position = self.getMap().getStartPosition(player.getTeam().getID())
#else:
# # otherwise do free-for-all spawn locations
position = self.getMap().getFFAStartPosition(self.players)
angle = 20
#spaz = self.spawnPlayerSpaz(player)
# lets reconnect this player's controls to this
# spaz but *without* the ability to attack or pick stuff up
#spaz.connectControlsToPlayer(enablePunch=False,
# enableBomb=False,
# enablePickUp=False)
# also lets have them make some noise when they die..
#spaz.playBigDeathSound = True
name = player.getName()
lightColor = bsUtils.getNormalizedColor(player.color)
displayColor = bs.getSafeColor(player.color, targetIntensity=0.75)
spaz = PlayerSpaz_Zom(color=player.color,
highlight=player.highlight,
character=player.character,
player=player)
player.setActor(spaz)
#For some reason, I can't figure out how to get a list of all spaz.
#Therefore, I am making the list here so I can get which spaz belongs
#to the player supplied by HitMessage.
self.spazList.append(spaz)
# we want a bigger area-of-interest in co-op mode
# if isinstance(self.getSession(),bs.CoopSession): spaz.node.areaOfInterestRadius = 5.0
# else: spaz.node.areaOfInterestRadius = 5.0
# if this is co-op and we're on Courtyard or Runaround, add the material that allows us to
# collide with the player-walls
# FIXME; need to generalize this
if isinstance(self.getSession(), bs.CoopSession) and self.getMap().getName() in ['Courtyard', 'Tower D']:
mat = self.getMap().preloadData['collideWithWallMaterial']
spaz.node.materials += (mat,)
spaz.node.rollerMaterials += (mat,)
spaz.node.name = name
spaz.node.nameColor = displayColor
spaz.connectControlsToPlayer() #Unfortunately, I can't figure out how to prevent picking up other player but allow other pickup.
factory = spaz.getFactory()
self.scoreSet.playerGotNewSpaz(player, spaz)
# move to the stand position and add a flash of light
spaz.handleMessage(bs.StandMessage(position, angle if angle is not None else random.uniform(0, 360)))
t = bs.getGameTime()
bs.playSound(self._spawnSound, 1, position=spaz.node.position)
light = bs.newNode('light', attrs={'color': lightColor})
spaz.node.connectAttr('position', light, 'position')
bsUtils.animate(light, 'intensity', {0: 0, 250: 1, 500: 0})
bs.gameTimer(500, light.delete)
#Start code to spawn special guy:
#End of code to spawn special guy
if not self._soloMode:
bs.gameTimer(300,bs.Call(self._printLives,player))
# if we have any icons, update their state
for icon in player.gameData['icons']:
icon.handlePlayerSpawned()
def respawnPlayerZombie(self,player,respawnTime=None):
"""
Given a bs.Player, sets up a standard respawn timer,
along with the standard counter display, etc.
At the end of the respawn period spawnPlayer() will
be called if the Player still exists.
An explicit 'respawnTime' can optionally be provided
(in milliseconds).
"""
if player is None or not player.exists():
if player is None: bs.printError('None passed as player to respawnPlayer()')
else: bs.printError('Nonexistant bs.Player passed to respawnPlayer(); call player.exists() to make sure a player is still there.')
return
if player.getTeam() is None:
bs.printError('player has no team in respawnPlayer()')
return
if respawnTime is None:
if len(player.getTeam().players) == 1: respawnTime = 3000
elif len(player.getTeam().players) == 2: respawnTime = 5000
elif len(player.getTeam().players) == 3: respawnTime = 6000
else: respawnTime = 7000
# if this standard setting is present, factor it in
if 'Respawn Times' in self.settings: respawnTime *= self.settings['Respawn Times']
respawnTime = int(max(1000,respawnTime))
if respawnTime%1000 != 0: respawnTime -= respawnTime%1000 # we want whole seconds
if player.actor and not self.hasEnded():
import bsSpaz
player.gameData['respawnTimer'] = bs.Timer(respawnTime,bs.WeakCall(self.spawnPlayerIfExistsAsZombie,player))
player.gameData['respawnIcon'] = bsSpaz.RespawnIcon(player,respawnTime)
def spawnPlayerIfExistsAsZombie(self,player):
"""
A utility method which calls self.spawnPlayer() *only* if the bs.Player
provided still exists; handy for use in timers and whatnot.
There is no need to override this; just override spawnPlayer().
"""
if player.exists(): self.spawnPlayerZombie(player)
def spawnPlayerZombie(self,player):
position = self.getMap().getFFAStartPosition(self.players)
angle = 20
name = player.getName()
lightColor = bsUtils.getNormalizedColor(player.color)
displayColor = bs.getSafeColor(player.color, targetIntensity=0.75)
spaz = PlayerZombie(color=player.color,
highlight=player.highlight,
character='Kronk2',
player=player)
player.setActor(spaz)
#For some reason, I can't figure out how to get a list of all spaz.
#Therefore, I am making the list here so I can get which spaz belongs
#to the player supplied by HitMessage.
self.spazList.append(spaz)
# we want a bigger area-of-interest in co-op mode
# if isinstance(self.getSession(),bs.CoopSession): spaz.node.areaOfInterestRadius = 5.0
# else: spaz.node.areaOfInterestRadius = 5.0
# if this is co-op and we're on Courtyard or Runaround, add the material that allows us to
# collide with the player-walls
# FIXME; need to generalize this
if isinstance(self.getSession(), bs.CoopSession) and self.getMap().getName() in ['Courtyard', 'Tower D']:
mat = self.getMap().preloadData['collideWithWallMaterial']
spaz.node.materials += (mat,)
spaz.node.rollerMaterials += (mat,)
#Need to prevent picking up powerups:
pam = bs.Powerup.getFactory().powerupAcceptMaterial
for attr in ['materials','rollerMaterials','extrasMaterials']:
materials = getattr(spaz.node,attr)
if pam in materials:
setattr(spaz.node,attr,tuple(m for m in materials if m != pam))
#spaz.node.materials.remove(pam)
#spaz.node.rollerMaterials.remove(pam)
#spaz.node.extrasMaterials.remove(pam)
spaz.node.name = name
spaz.node.nameColor = displayColor
spaz.connectControlsToPlayer(enablePunch=True,
enableBomb=False,
enablePickUp=False) #Unfortunately, I can't figure out how to prevent picking up other player but allow other pickup.
self.scoreSet.playerGotNewSpaz(player, spaz)
# move to the stand position and add a flash of light
spaz.handleMessage(bs.StandMessage(position, angle if angle is not None else random.uniform(0, 360)))
t = bs.getGameTime()
bs.playSound(self._spawnSound, 1, position=spaz.node.position)
light = bs.newNode('light', attrs={'color': lightColor})
spaz.node.connectAttr('position', light, 'position')
bsUtils.animate(light, 'intensity', {0: 0, 250: 1, 500: 0})
bs.gameTimer(500, light.delete)
#Start code to spawn special guy:
#End of code to spawn special guy
if not self._soloMode:
bs.gameTimer(300,bs.Call(self._printLives,player))
# if we have any icons, update their state
for icon in player.gameData['icons']:
icon.handlePlayerSpawned()
def _printLives(self,player):
if not player.exists() or not player.isAlive(): return
try: pos = player.actor.node.position
except Exception,e:
print 'EXC getting player pos in bsElim',e
return
if player.gameData['lives'] > 0:
bs.PopupText('x'+str(player.gameData['lives']-1),color=(1,1,0,1),
offset=(0,-0.8,0),randomOffset=0.0,scale=1.8,position=pos).autoRetain()
else:
bs.PopupText('Dead!',color=(1,1,0,1),
offset=(0,-0.8,0),randomOffset=0.0,scale=1.8,position=pos).autoRetain()
def onPlayerLeave(self,player):
bs.TeamGameActivity.onPlayerLeave(self,player)
player.gameData['icons'] = None
# remove us from spawn-order
if self._soloMode:
if player in player.getTeam().gameData['spawnOrder']:
player.getTeam().gameData['spawnOrder'].remove(player)
# update icons in a moment since our team will be gone from the list then
bs.gameTimer(0, self._updateIcons)
def onBegin(self):
bs.TeamGameActivity.onBegin(self)
self.setupStandardTimeLimit(self.settings['Time Limit'])
self.setupStandardPowerupDrops()
self.zombieQ = 1 # queue of zombies to spawn. this will increment/decrement
if self._soloMode:
self._vsText = bs.NodeActor(bs.newNode("text",
attrs={'position':(0,105),
'hAttach':"center",
'hAlign':'center',
'maxWidth':200,
'shadow':0.5,
'vrDepth':390,
'scale':0.6,
'vAttach':"bottom",
'color':(0.8,0.8,0.3,1.0),
'text':bs.Lstr(resource='vsText')}))
# if balance-team-lives is on, add lives to the smaller team until total lives match
if (isinstance(self.getSession(),bs.TeamsSession)
and self.settings['Balance Total Lives']
and len(self.teams[0].players) > 0
and len(self.teams[1].players) > 0):
if self._getTotalTeamLives(self.teams[0]) < self._getTotalTeamLives(self.teams[1]):
lesserTeam = self.teams[0]
greaterTeam = self.teams[1]
else:
lesserTeam = self.teams[1]
greaterTeam = self.teams[0]
addIndex = 0
while self._getTotalTeamLives(lesserTeam) < self._getTotalTeamLives(greaterTeam):
lesserTeam.players[addIndex].gameData['lives'] += 1
addIndex = (addIndex + 1) % len(lesserTeam.players)
#Let's add a couple of bots
# this wrangles our bots
self._bots = zBotSet()
#Set colors and character for ToughGuyBot to be zombie
setattr(bs.ToughGuyBot, 'color', (0.4,0.1,0.05))
setattr(bs.ToughGuyBot, 'highlight', (0.2,0.4,0.3))
setattr(bs.ToughGuyBot, 'character', 'Kronk2')
# start some timers to spawn bots
thePt = self.getMap().getFFAStartPosition(self.players)
#bs.gameTimer(1000,bs.Call(self._bots.spawnBot,bs.ToughGuyBot,pos=thePt,spawnTime=3000))
self._updateIcons()
self._updateScoreBoard
# we could check game-over conditions at explicit trigger points,
# but lets just do the simple thing and poll it...
bs.gameTimer(1000, self._update, repeat=True)
def _getTotalTeamLives(self,team):
return sum(player.gameData['lives'] for player in team.players)
def handleMessage(self,m):
if isinstance(m,bs.PlayerSpazDeathMessage):
bs.TeamGameActivity.handleMessage(self, m) # augment standard behavior
player = m.spaz.getPlayer()
#print([player, m.spaz.hitPoints, "killed by", m.killerPlayer])
if player.gameData['lives'] > 0: #Dying player was not zombie. Remove a life
player.gameData['lives'] -= 1
else: #Dying player was a zombie. Give points to killer
if m.killerPlayer.exists():
if m.killerPlayer.gameData['lives'] > 0:
m.killerPlayer.getTeam().gameData['score'] += 2
self._updateScoreBoard()
#Remove this spaz from the list of active spazzes
if m.spaz in self.spazList: self.spazList.remove(m.spaz)
if player.gameData['lives'] < 0:
bs.printError('Got lives < 0 in Elim; this shouldnt happen. solo:'+str(self._soloMode))
player.gameData['lives'] = 0
# if we have any icons, update their state
for | |
<reponame>nukui-s/mlens
"""ML-ENSEMBLE
:author: <NAME>
:copyright: 2017-2018
:licence: MIT
Blend Ensemble class. Fully integrable with Scikit-learn.
"""
from __future__ import division
from .base import BaseEnsemble
from ..index import BlendIndex, FullIndex
class BlendEnsemble(BaseEnsemble):
r"""Blend Ensemble class.
The Blend Ensemble is a supervised ensemble closely related to
the :class:`SuperLearner`. It differs in that to estimate the prediction
matrix Z used by the meta learner, it uses a subset of the data to predict
its complement, and the meta learner is fitted on those predictions.
By only fitting every base learner once on a subset
of the full training data, :class:`BlendEnsemble` is a fast ensemble
that can handle very large datasets simply by only using portion of it at
each stage. The cost of this approach is that information is thrown out
at each stage, as one layer will not see the training data used by the
previous layer.
With large data that can be expected to satisfy an i.i.d. assumption, the
:class:`BlendEnsemble` can achieve similar performance to more
sophisticated ensembles at a fraction of the training time. However, with
data data is not uniformly distributed or exhibits high variance the
:class:`BlendEnsemble` can be a poor choice as information is lost at
each stage of fitting.
See Also
--------
:class:`SuperLearner`, :class:`Subsemble`
.. note :: All parameters can be overriden in the :attr:`add` method unless
otherwise specified. Notably, the ``backend`` and ``n_jobs`` cannot
be altered in the :attr:`add` method.
Parameters
----------
test_size : int, float (default = 0.5)
the size of the test set for each layer. This parameter can be
overridden in the :attr:`add` method if different test sizes is desired
for each layer. If a ``float`` is specified, it is presumed to be the
fraction of the available data to be used for training, and so
``0. < test_size < 1.``.
shuffle : bool (default = False)
whether to shuffle data before before processing each layer. This
parameter can be overridden in the :attr:`add` method if different test
sizes is desired for each layer.
random_state : int (default = None)
random seed for shuffling inputs. Note that the seed here is used to
generate a unique seed for each layer. Can be overridden in the
:attr:`add` method.
scorer : object (default = None)
scoring function. If a function is provided, base estimators will be
scored on the prediction made. The scorer should be a function that
accepts an array of true values and an array of predictions:
``score = f(y_true, y_pred)``. Can be overridden in the :attr:`add` method.
raise_on_exception : bool (default = True)
whether to issue warnings on soft exceptions or raise error.
Examples include lack of layers, bad inputs, and failed fit of an
estimator in a layer. If set to ``False``, warnings are issued instead
but estimation continues unless exception is fatal. Note that this
can result in unexpected behavior unless the exception is anticipated.
array_check : int (default = 2)
level of strictness in checking input arrays.
- ``array_check = 0`` will not check ``X`` or ``y``
- ``array_check = 1`` will check ``X`` and ``y`` for
inconsistencies and warn when format looks suspicious,
but retain original format.
- ``array_check = 2`` will impose Scikit-learn array checks,
which converts ``X`` and ``y`` to numpy arrays and raises
an error if conversion fails.
verbose : int or bool (default = False)
level of verbosity.
* ``verbose = 0`` silent (same as ``verbose = False``)
* ``verbose = 1`` messages at start and finish (same as
``verbose = True``)
* ``verbose = 2`` messages for each layer
If ``verbose >= 50`` prints to ``sys.stdout``, else ``sys.stderr``.
For verbosity in the layers themselves, use ``fit_params``.
n_jobs : int (default = -1)
Degree of parallel processing. Set to -1 for maximum parallelism and
1 for sequential processing. Cannot be overriden in the :attr:`add` method.
backend : str or object (default = 'threading')
backend infrastructure to use during call to
:class:`mlens.externals.joblib.Parallel`. See Joblib for further
documentation. To set global backend, set ``mlens.config._BACKEND``.
Cannot be overriden in the :attr:`add` method.
model_selection: bool (default=False)
Whether to use the ensemble in model selection mode. If ``True``,
this will alter the ``transform`` method. When calling ``transform``
on new data, the ensemble will call ``predict``, while calling
``transform`` with the training data reproduces predictions from the
``fit`` call. Hence the ensemble can be used as a pure transformer
in a preprocessing pipeline passed to the :class:`Evaluator`, as
training folds are faithfully reproduced as during a ``fit``call and
test folds are transformed with the ``predict`` method.
sample_size: int (default=20)
size of training set sample
(``[min(sample_size, X.size[0]), min(X.size[1], sample_size)]``)
Examples
--------
Instantiate ensembles with no preprocessing: use list of estimators
>>> from mlens.ensemble import BlendEnsemble
>>> from mlens.metrics.metrics import rmse
>>> from sklearn.datasets import load_boston
>>> from sklearn.linear_model import Lasso
>>> from sklearn.svm import SVR
>>>
>>> X, y = load_boston(True)
>>>
>>> ensemble = BlendEnsemble()
>>> ensemble.add([SVR(), ('can name some or all est', Lasso())])
>>> ensemble.add_meta(SVR())
>>>
>>> ensemble.fit(X, y)
>>> preds = ensemble.predict(X)
>>> rmse(y, preds)
7.3337...
Instantiate ensembles with different preprocessing pipelines through dicts.
>>> from mlens.ensemble import BlendEnsemble
>>> from mlens.metrics.metrics import rmse
>>> from sklearn.datasets import load_boston
>>> from sklearn. preprocessing import MinMaxScaler, StandardScaler
>>> from sklearn.linear_model import Lasso
>>> from sklearn.svm import SVR
>>>
>>> X, y = load_boston(True)
>>>
>>> preprocessing_cases = {'mm': [MinMaxScaler()],
... 'sc': [StandardScaler()]}
>>>
>>> estimators_per_case = {'mm': [SVR()],
... 'sc': [('can name some or all ests', Lasso())]}
>>>
>>> ensemble = BlendEnsemble()
>>> ensemble.add(estimators_per_case, preprocessing_cases).add(SVR(),
... meta=True)
>>>
>>> ensemble.fit(X, y)
>>> preds = ensemble.predict(X)
>>> rmse(y, preds)
8.249013
"""
def __init__(
self, test_size=0.5, shuffle=False, random_state=None, scorer=None,
raise_on_exception=True, array_check=2, verbose=False, n_jobs=-1,
backend=None, model_selection=False, sample_size=20, layers=None):
super(BlendEnsemble, self).__init__(
shuffle=shuffle, random_state=random_state, scorer=scorer,
raise_on_exception=raise_on_exception, array_check=array_check,
verbose=verbose, n_jobs=n_jobs, model_selection=model_selection,
sample_size=sample_size, layers=layers, backend=backend)
self.__initialized__ = 0 # Unlock parameter setting
self.test_size = test_size
self.__initialized__ = 1 # Protect against param resets
def add_meta(self, estimator, **kwargs):
"""Meta Learner.
Compatibility method for adding a meta learner to be used for final
predictions.
Parameters
----------
estimator : instance
estimator instance.
**kwargs : optional
optional keyword arguments.
"""
return self.add(estimators=estimator, meta=True, **kwargs)
def add(self, estimators, preprocessing=None,
proba=False, meta=False, propagate_features=None, **kwargs):
"""Add layer to ensemble.
Parameters
----------
preprocessing: dict of lists or list, optional (default = None)
preprocessing pipelines for given layer. If
the same preprocessing applies to all estimators, ``preprocessing``
should be a list of transformer instances. The list can contain the
instances directly, named tuples of transformers,
or a combination of both. ::
option_1 = [transformer_1, transformer_2]
option_2 = [("trans-1", transformer_1),
("trans-2", transformer_2)]
option_3 = [transformer_1, ("trans-2", transformer_2)]
If different preprocessing pipelines are desired, a dictionary
that maps preprocessing pipelines must be passed. The names of the
preprocessing dictionary must correspond to the names of the
estimator dictionary. ::
preprocessing_cases = {"case-1": [trans_1, trans_2],
"case-2": [alt_trans_1, alt_trans_2]}
estimators = {"case-1": [est_a, est_b],
"case-2": [est_c, est_d]}
The lists for each dictionary entry can be any of ``option_1``,
``option_2`` and ``option_3``.
estimators: dict of lists or list or instance
estimators constituting the layer. If preprocessing is none and the
layer is meant to be the meta estimator, it is permissible to pass
a single instantiated estimator. If ``preprocessing`` is
``None`` or ``list``, ``estimators`` should be a ``list``.
The list can either contain estimator instances,
named tuples of estimator instances, or a combination of both. ::
option_1 = [estimator_1, estimator_2]
option_2 = [("est-1", estimator_1), ("est-2", estimator_2)]
option_3 = [estimator_1, ("est-2", estimator_2)]
If different preprocessing pipelines are desired, a dictionary
that maps estimators to preprocessing pipelines must be passed.
The names of the estimator dictionary must correspond to the
names of the estimator dictionary. ::
preprocessing_cases = {"case-1": [trans_1, trans_2],
"case-2": [alt_trans_1, alt_trans_2]}
estimators = {"case-1": [est_a, est_b],
"case-2": [est_c, est_d]}
The lists for each dictionary entry can be any of ``option_1``,
``option_2`` and ``option_3``.
proba : bool (default = False)
| |
<filename>dbca/dbca_splitter.py
from typing import List, Tuple
import logging
import numpy as np
from tqdm import tqdm
import time
import json
from pathlib import Path
import pandas as pd
import matplotlib.pyplot as plt
from dbca.sample import Sample
from dbca.sample_set import SampleSet
from dbca.full_sample_set import FullSampleSet
from dbca.split_sample_set import SplitSampleSet
from dbca.storage import SampleStore
from dbca.config import DBCASplitterConfig
from dbca.dist_funcs import chernoff_divergence
from dbca.freq_distribution import FrequencyDistribution
logger = logging.getLogger(__name__)
def get_weights_df(sample_set: SampleSet, dist_type: str = "compound") -> pd.DataFrame:
"""
Return DataFrame of atom/compound weights in a sample set.
Args:
sample_set (SampleSet): Sample set from which to extract weights.
dist_type (str, optional): "atom" or "compound" to designate which weights to extract. Defaults to "compound".
Returns:
pd.DataFrame: DataFrame of atom/compound weights in a sample set.
"""
assert(dist_type in ["compound", "atom"])
weights = sample_set.compound_weights if dist_type == "compound" else sample_set.atom_weights
df = pd.DataFrame([{"name": k, "weight": v} for k,v in weights.items()])
df = df.set_index('name')
df.sort_values(by="weight", ascending=False, inplace=True)
return df
class DBCASplitter:
"""
Processes a sample set to generate training and test splits according to
specified compositionality settings, which are controlled by:
* D_A: target atom divergence
* D_C: target compound divergence
A compositional generalization setting features high compound divergence
and low atom divergence.
For full detail of generation settings, see DBCASplitterConfig.
"""
def __init__(self, samples: List[Sample], config: DBCASplitterConfig = None):
"""
Create new DBCASplitter.
Args:
samples (List[Sample]): Full set of samples to create splits from.
config (DBCASplitterConfig, optional): Optional settings for split generation.
"""
self.sample_store = SampleStore(samples)
self.full_sample_set = FullSampleSet(sample_store=self.sample_store, workers=config.num_loader_processes)
self.sample_splits = {s_id: None for s_id in self.full_sample_set.sample_ids}
self.unused_sample_ids = set(self.sample_splits.keys())
self.config = config if config else DBCASplitterConfig()
self.logger = logging.getLogger(__name__)
self.train_set = SplitSampleSet(split="train")
self.test_set = SplitSampleSet(split="test")
# set seed for reproduceability
np.random.seed(self.config.seed)
def init_build(self):
self.logger.info(f"Creating output directory at {self.config.save_dir}...")
self.config.save_dir_path.mkdir(parents=True, exist_ok=True)
self.save_config(self.config.save_dir_path)
@property
def step(self) -> int:
"""
Current step of generation.
"""
return self.train_set.size + self.test_set.size
@property
def total_steps(self) -> int:
"""
Return total number of steps needed for generation of splits.
"""
return self.config.n_train + self.config.n_test
@property
def steps_left(self) -> int:
"""
Return number of steps left for generation process.
"""
return self.total_steps - self.step
@property
def build_done(self) -> bool:
"""
Return True if build complete, False o.w.
"""
return ((self.train_set.size == self.config.n_train) and
(self.test_set.size == self.config.n_test))
@property
def target_train_test_ratio(self):
"""
Return target desired ratio between train and test set size.
"""
return self.config.train_test_ratio
@property
def curr_train_test_ratio(self):
"""
Return current ratio between train and test set size.
"""
if self.train_set.size == 0:
return 0
if self.test_set.size == 0:
return np.inf
return self.train_set.size / self.test_set.size
@property
def train_step(self) -> bool:
""" Return True if current step should add sample to training set and False o.w. (should add to test set)"""
if (self.train_set.size % self.target_train_test_ratio != 0):
return True
elif self.curr_train_test_ratio <= self.target_train_test_ratio:
return True
else:
return False
def get_sample(self, sample_id: str) -> Sample:
"""
Return sample with id `sample_id`.
"""
return self.sample_store.sample_by_id(sample_id)
def calc_atom_divergence(self, dist_a: FrequencyDistribution,
dist_b: FrequencyDistribution) -> float:
return chernoff_divergence(dist_a, dist_b, alpha=self.config.alpha_a)
def calc_compound_divergence(self, dist_a: FrequencyDistribution,
dist_b: FrequencyDistribution) -> float:
return chernoff_divergence(dist_a, dist_b, alpha=self.config.alpha_c)
@property
def atom_divergence(self)-> float:
"""
Returns
-------
float
DESCRIPTION.
"""
return self.calc_atom_divergence(self.train_set.atom_distribution,
self.test_set.atom_distribution)
@property
def compound_divergence(self) -> float:
"""
Returns
-------
float
DESCRIPTION.
"""
return self.calc_compound_divergence(self.train_set.compound_distribution,
self.test_set.compound_distribution)
def score(self, train_a_dist: FrequencyDistribution,
test_a_dist: FrequencyDistribution,
train_c_dist: FrequencyDistribution,
test_c_dist: FrequencyDistribution) -> float:
"""
Calculate score of input train/test splits.
Parameters
----------
train_a_dist : FrequencyDistribution
Train atom distribution.
test_a_dist : FrequencyDistribution
Test atom distribution.
train_c_dist : FrequencyDistribution
Train compound distribution.
test_c_dist : FrequencyDistribution
Test compound distribution.
Returns
-------
float
Score of input train/test splits (in [0,1]).
"""
atom_score = self.calc_atom_divergence(train_a_dist,
test_a_dist)
# exact atom divergence less important than it being below maximal threshold
if atom_score < self.config.D_A:
atom_score = 0
compound_score = np.abs(self.calc_compound_divergence(train_c_dist,
test_c_dist) - self.config.D_C)
return compound_score + atom_score
def find_best_sample(self, sample_set_to_update: SplitSampleSet,
other_sample_set: SplitSampleSet) -> str:
"""
Greedily select the best sample to add to `sample_set_to_update` w.r.t target
atom and compound divergence measures.
Parameters
----------
sample_set_to_update : SplitSampleSet
Sample set to be updated with chosen sample.
other_sample_set : SplitSampleSet
The other sample set (not updated).
Returns
-------
str
id of found sample.
"""
all_scores = []
self.logger.debug("Searching for new sample...")
for s_id in self.unused_sample_ids:
all_scores.append((s_id, self.peek_sample(s_id,
sample_set_to_update, other_sample_set)))
sorted_scores = sorted(all_scores, key=lambda x: (x[1], x[0]))
best_id, best_score = sorted_scores[0]
debug_infos = {"best_id": best_id,
"best_score": best_score,
"all_scores": all_scores}
return best_id, debug_infos
def peek_sample(self, sample_id: str, sample_set_to_update: SplitSampleSet,
other_sample_set: SplitSampleSet) -> float:
"""
Check score for adding sample `sample_id` to `sample_set_to_update` without actually
making the update (not in-place).
Args:
sample_id (str): id of sample to check update for.
sample_set_to_update (SplitSampleSet): Sample set to be updated with chosen sample.
other_sample_set (SplitSampleSet): The other sample set (not updated).
Returns:
float: Split score if we had added `sample_id` to `sample_set_to_update`
"""
a_dist, c_dist = sample_set_to_update.update(sample_id, self.full_sample_set,
inplace=False)
if sample_set_to_update.is_train:
train_a_dist = a_dist
train_c_dist = c_dist
test_a_dist = other_sample_set.atom_distribution
test_c_dist = other_sample_set.compound_distribution
else:
test_a_dist = a_dist
test_c_dist = c_dist
train_a_dist = other_sample_set.atom_distribution
train_c_dist = other_sample_set.compound_distribution
return self.score(train_a_dist, test_a_dist, train_c_dist, test_c_dist)
def add_sample_to_set(self, sample_id: str, split_sample_set: SplitSampleSet):
"""
Add new sample to sample set (update in-place).
"""
split = split_sample_set.split_type
self.sample_splits[sample_id] = split
self.unused_sample_ids.remove(sample_id)
split_sample_set.update(sample_id, full_sample_set=self.full_sample_set,
inplace=True)
def save_config(self, save_dir: Path):
"""
Save configuration file as JSON.
"""
config_path = save_dir / 'config.json'
config_path.write_text(self.config.to_json())
def save_samples(self, path: Path):
"""
Save actual splits to `save_path`.
"""
train_samples = ""
with open(f'{path.parent}/train_samples.txt', 'a') as train, open(f'{path.parent}/test_samples.txt', 'a') as test:
for s_id, split in self.sample_splits.items():
if split:
if split.value == "train":
train.write(self.sample_store._samples[s_id]._name)
train.write("\n")
elif split.value == "test":
test.write(self.sample_store._samples[s_id]._name)
test.write("\n")
else:
assert False
train.close()
test.close()
#json.dump(train_samples, (path.parent / (path.name + '-train.json')).open(mode="w"))
#json.dump(test_samples, (path.parent / (path.name + '-test.json')).open(mode="w"))
def save_splits(self, save_path: Path):
"""
Save split information to `save_path`.
"""
splits = {s_id: split.value for s_id, split in self.sample_splits.items() if split}
save_data = {"splits": splits,
"train_size": self.train_set.size,
"test_size": self.test_set.size,
"compound_divergence": self.compound_divergence,
"atom_divergence": self.atom_divergence
}
json.dump(save_data, save_path.open(mode="w"))
def end_iteration(self):
if self.config.save_progress:
self.save_splits(Path(self.config.save_dir) / "data.json")
def end_generation(self):
"""
End generation process.
"""
self.logger.info("Finished creating dataset!")
self.logger.info(f"Train set size: {self.train_set.size}, test set size: {self.test_set.size}")
self.logger.info(f"D_A: {self.atom_divergence}, D_C: {self.compound_divergence}")
self.logger.info(f"Saving to {self.config.save_dir}...")
self.save_splits(Path(self.config.save_dir) / "data.json")
self.save_samples(Path(self.config.save_dir) / "samples")
def generate_splits(self) -> Tuple[SplitSampleSet, SplitSampleSet]:
"""
At each iteration, a new sample u is selected such that D_C(V||W) and
D_A(V||W) are kept as closely as possible to the desired values.
Returns
-------
SplitSampleSet
Generated train set split.
SplitSampleSet
Generated test set split.
"""
self.init_build()
# add random init to initialize train set
chosen_sample_id = np.random.choice(list(self.unused_sample_ids))
self.logger.info(f"Choosing random first sample: {chosen_sample_id}...")
self.add_sample_to_set(chosen_sample_id, self.train_set)
# main split generation loop
self.logger.info("Starting to create splits...")
for i in tqdm(range(self.steps_left), total=self.steps_left):
tic = time.perf_counter()
if self.train_step:
best_id, debug_infos = self.find_best_sample(self.train_set, self.test_set)
self.add_sample_to_set(best_id, self.train_set)
split = self.train_set.split_type.value
else:
best_id, debug_infos = self.find_best_sample(self.test_set, self.train_set)
self.add_sample_to_set(best_id, self.test_set)
split = self.test_set.split_type.value
toc = time.perf_counter()
self.logger.debug(f"Found new {split} sample ({best_id})! Score: {debug_infos.get('best_score')}, Time: {toc - tic:0.4f}")
self.logger.debug(f"Train set size: {self.train_set.size}, test set size: {self.test_set.size}")
self.logger.debug(f"D_A: {self.atom_divergence}, D_C: {self.compound_divergence}")
self.end_iteration()
self.end_generation()
self.logger.info("Done!")
@classmethod
def measure_sample_sets(cls, train_set: List[Sample], test_set: List[Sample]):
"""
Measure atom and compound divergence between two existing sample sets.
Parameters
----------
train_set : List[Sample]
Train set samples.
test_set : List[Sample]
Test set samples.
Returns
-------
float:
atom divergence
float:
compound divergence
DBCASplitter:
DBCASplitter object containing full split details.
"""
dbca_splitter = cls(train_set + test_set)
for sample in train_set:
dbca_splitter.add_sample_to_set(sample.id, dbca_splitter.train_set)
for sample in test_set:
dbca_splitter.add_sample_to_set(sample.id, dbca_splitter.test_set)
return dbca_splitter.atom_divergence, dbca_splitter.compound_divergence, dbca_splitter
def get_merged_weights_df(self, dist_type: str = "compound") -> pd.DataFrame:
tr_df = get_weights_df(self.train_set, dist_type=dist_type)
te_df = get_weights_df(self.test_set, dist_type=dist_type)
com_df = pd.concat((tr_df, te_df), axis=1)
com_df.columns = ["train", "test"]
com_df.fillna(0, | |
name_='MyDigiPassEidAddress')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='MyDigiPassEidAddress', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='MyDigiPassEidAddress'):
if self.streetAndNumber is not None and 'streetAndNumber' not in already_processed:
already_processed.add('streetAndNumber')
outfile.write(' streetAndNumber=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.streetAndNumber), input_name='streetAndNumber')), ))
if self.zipCode is not None and 'zipCode' not in already_processed:
already_processed.add('zipCode')
outfile.write(' zipCode=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.zipCode), input_name='zipCode')), ))
if self.municipality is not None and 'municipality' not in already_processed:
already_processed.add('municipality')
outfile.write(' municipality=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.municipality), input_name='municipality')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='MyDigiPassEidAddress', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('streetAndNumber', node)
if value is not None and 'streetAndNumber' not in already_processed:
already_processed.add('streetAndNumber')
self.streetAndNumber = value
value = find_attr_value_('zipCode', node)
if value is not None and 'zipCode' not in already_processed:
already_processed.add('zipCode')
self.zipCode = value
value = find_attr_value_('municipality', node)
if value is not None and 'municipality' not in already_processed:
already_processed.add('municipality')
self.municipality = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class MyDigiPassEidAddress
class MyDigiPassProfile(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, updatedAt=None, firstName=None, lastName=None, bornOn=None, preferredLocale=None, uuid=None):
self.original_tagname_ = None
self.updatedAt = _cast(None, updatedAt)
self.firstName = _cast(None, firstName)
self.lastName = _cast(None, lastName)
self.bornOn = _cast(None, bornOn)
self.preferredLocale = _cast(None, preferredLocale)
self.uuid = _cast(None, uuid)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, MyDigiPassProfile)
if subclass is not None:
return subclass(*args_, **kwargs_)
if MyDigiPassProfile.subclass:
return MyDigiPassProfile.subclass(*args_, **kwargs_)
else:
return MyDigiPassProfile(*args_, **kwargs_)
factory = staticmethod(factory)
def get_updatedAt(self): return self.updatedAt
def set_updatedAt(self, updatedAt): self.updatedAt = updatedAt
def get_firstName(self): return self.firstName
def set_firstName(self, firstName): self.firstName = firstName
def get_lastName(self): return self.lastName
def set_lastName(self, lastName): self.lastName = lastName
def get_bornOn(self): return self.bornOn
def set_bornOn(self, bornOn): self.bornOn = bornOn
def get_preferredLocale(self): return self.preferredLocale
def set_preferredLocale(self, preferredLocale): self.preferredLocale = preferredLocale
def get_uuid(self): return self.uuid
def set_uuid(self, uuid): self.uuid = uuid
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='MyDigiPassProfile', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('MyDigiPassProfile')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='MyDigiPassProfile')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='MyDigiPassProfile', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='MyDigiPassProfile'):
if self.updatedAt is not None and 'updatedAt' not in already_processed:
already_processed.add('updatedAt')
outfile.write(' updatedAt=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.updatedAt), input_name='updatedAt')), ))
if self.firstName is not None and 'firstName' not in already_processed:
already_processed.add('firstName')
outfile.write(' firstName=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.firstName), input_name='firstName')), ))
if self.lastName is not None and 'lastName' not in already_processed:
already_processed.add('lastName')
outfile.write(' lastName=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.lastName), input_name='lastName')), ))
if self.bornOn is not None and 'bornOn' not in already_processed:
already_processed.add('bornOn')
outfile.write(' bornOn=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.bornOn), input_name='bornOn')), ))
if self.preferredLocale is not None and 'preferredLocale' not in already_processed:
already_processed.add('preferredLocale')
outfile.write(' preferredLocale=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.preferredLocale), input_name='preferredLocale')), ))
if self.uuid is not None and 'uuid' not in already_processed:
already_processed.add('uuid')
outfile.write(' uuid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.uuid), input_name='uuid')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='MyDigiPassProfile', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('updatedAt', node)
if value is not None and 'updatedAt' not in already_processed:
already_processed.add('updatedAt')
self.updatedAt = value
value = find_attr_value_('firstName', node)
if value is not None and 'firstName' not in already_processed:
already_processed.add('firstName')
self.firstName = value
value = find_attr_value_('lastName', node)
if value is not None and 'lastName' not in already_processed:
already_processed.add('lastName')
self.lastName = value
value = find_attr_value_('bornOn', node)
if value is not None and 'bornOn' not in already_processed:
already_processed.add('bornOn')
self.bornOn = value
value = find_attr_value_('preferredLocale', node)
if value is not None and 'preferredLocale' not in already_processed:
already_processed.add('preferredLocale')
self.preferredLocale = value
value = find_attr_value_('uuid', node)
if value is not None and 'uuid' not in already_processed:
already_processed.add('uuid')
self.uuid = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class MyDigiPassProfile
class MyDigiPassAddress(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, address1=None, address2=None, city=None, zip=None, country=None, state=None):
self.original_tagname_ = None
self.address1 = _cast(None, address1)
self.address2 = _cast(None, address2)
self.city = _cast(None, city)
self.zip = _cast(None, zip)
self.country = _cast(None, country)
self.state = _cast(None, state)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, MyDigiPassAddress)
if subclass is not None:
return subclass(*args_, **kwargs_)
if MyDigiPassAddress.subclass:
return MyDigiPassAddress.subclass(*args_, **kwargs_)
else:
return MyDigiPassAddress(*args_, **kwargs_)
factory = staticmethod(factory)
def get_address1(self): return self.address1
def set_address1(self, address1): self.address1 = address1
def get_address2(self): return self.address2
def set_address2(self, address2): self.address2 = address2
def get_city(self): return self.city
def set_city(self, city): self.city = city
def get_zip(self): return self.zip
def set_zip(self, zip): self.zip = zip
def get_country(self): return self.country
def set_country(self, country): self.country = country
def get_state(self): return self.state
def set_state(self, state): self.state = state
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='MyDigiPassAddress', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('MyDigiPassAddress')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='MyDigiPassAddress')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='MyDigiPassAddress', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='MyDigiPassAddress'):
if self.address1 is not None and 'address1' not in already_processed:
already_processed.add('address1')
outfile.write(' address1=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.address1), input_name='address1')), ))
if self.address2 is not None and 'address2' not in already_processed:
already_processed.add('address2')
outfile.write(' address2=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.address2), input_name='address2')), ))
if self.city is not None and 'city' not in already_processed:
already_processed.add('city')
outfile.write(' city=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.city), input_name='city')), ))
if self.zip is not None and 'zip' not in already_processed:
already_processed.add('zip')
outfile.write(' zip=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.zip), input_name='zip')), ))
if self.country is not None and 'country' not in already_processed:
already_processed.add('country')
outfile.write(' country=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.country), input_name='country')), ))
if self.state is not None and 'state' not in already_processed:
already_processed.add('state')
outfile.write(' state=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.state), input_name='state')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='MyDigiPassAddress', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('address1', node)
if value is not None and 'address1' not in already_processed:
already_processed.add('address1')
self.address1 = value
value = find_attr_value_('address2', node)
if value is not None and 'address2' not in already_processed:
already_processed.add('address2')
self.address2 = value
value = find_attr_value_('city', node)
if value is not None and 'city' not in already_processed:
already_processed.add('city')
self.city = value
value = find_attr_value_('zip', node)
if value is not None and 'zip' not in already_processed:
already_processed.add('zip')
self.zip = value
value = find_attr_value_('country', node)
if value is not None and 'country' not in already_processed:
already_processed.add('country')
self.country = value
value = find_attr_value_('state', node)
if value is not None and 'state' not in already_processed:
already_processed.add('state')
self.state = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class MyDigiPassAddress
class MyDigiPassWidgetStep(WidgetStep):
subclass = None
superclass = WidgetStep
def __init__(self, id=None, creationTimestamp=None, definition=None, previousStep=None, nextStep=None, message=None, button=None, receivedTimestamp=None, acknowledgedTimestamp=None, displayValue=None, formButton=None, eidPhoto=None, email=None, phone=None, eidProfile=None, eidAddress=None, profile=None, address=None):
self.original_tagname_ = None
super(MyDigiPassWidgetStep, self).__init__(id, creationTimestamp, definition, previousStep, nextStep, message, button, receivedTimestamp, acknowledgedTimestamp, displayValue, formButton, )
self.eidPhoto = _cast(None, eidPhoto)
self.email = _cast(None, email)
self.phone = _cast(None, phone)
self.eidProfile = eidProfile
self.eidAddress = eidAddress
self.profile = profile
self.address = address
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, MyDigiPassWidgetStep)
if subclass is not None:
return subclass(*args_, **kwargs_)
if MyDigiPassWidgetStep.subclass:
return MyDigiPassWidgetStep.subclass(*args_, **kwargs_)
else:
return MyDigiPassWidgetStep(*args_, **kwargs_)
factory = staticmethod(factory)
def get_eidProfile(self): return self.eidProfile
def set_eidProfile(self, eidProfile): self.eidProfile = eidProfile
def get_eidAddress(self): return self.eidAddress
def set_eidAddress(self, eidAddress): self.eidAddress = | |
m21, Tam, Tp, Ts, Q = params
"""
Model with split, ancient migration, heterogenous effective population size (with 2 classes of loci shared by the two populations = Hill-Robertson effects)
nu1: Size of population 1 after split.
nu2: Size of population 2 after split.
b1: Population growth coefficient of population 1
b2: Population growth coefficient of population 2
hrf: Hill-Robertson factor, i.e. the degree to which Ne is locally reduced due to the effects of background selection and selective sweep effects
m12: Migration from pop 2 to pop 1 (2*Na*m12).
m21: Migration from pop 1 to pop 2.
Tam: The scaled time between the split and the end of ancient migration.
Ts: The scaled time between the end of ancient migration and present (in units of 2*Na generations).
Q: The proportion of the genome with a reduced effective size due to selection at linked sites
n1,n2: Size of fs to generate.
pts: Number of points to use in grid for evaluation.
"""
# Define the grid we'll use
xx = dadi.Numerics.default_grid(pts)
#### Calculate the pectrum in normally-recombining regions
# phi for the equilibrium ancestral population
phinr = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
phinr = dadi.Integration.one_pop(phinr, xx, Tp, nu=nuA)
# Now do the divergence event
phinr = dadi.PhiManip.phi_1D_to_2D(xx, phinr)
# We set the population sizes after the split to nu1 and nu2 and the migration rates to m12 and m21
phinr = dadi.Integration.two_pops(phinr, xx, Tam, nu1, nu2, m12=m12, m21=m21)
# We start the population size change after the split independantly in each population and set the migration rates to zero
bnu1_func = lambda t: nu1 * b1**(t/Ts)
bnu2_func = lambda t: nu2 * b2**(t/Ts)
phinr = dadi.Integration.two_pops(phinr, xx, Ts, bnu1_func, bnu2_func, m12=0, m21=0)
## calculate the spectrum.
fsnr = dadi.Spectrum.from_phi(phinr, (n1,n2), (xx,xx))
#### Spectrum of low-recombining regions
# phi for the equilibrium ancestral population
philr = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
philr = dadi.Integration.one_pop(philr, xx, Tp, nu=nuA)
# Now do the divergence event
philr = dadi.PhiManip.phi_1D_to_2D(xx, philr)
# We set the population sizes after the split to nu1 and nu2 and the migration rates to m12 and m21
philr = dadi.Integration.two_pops(philr, xx, Tam, nu1*hrf, nu2*hrf, m12=m12, m21=m21)
# We start the population size change after the split independantly (bnu{1,2}_func) & integrate the hrf for low-recombining regions in each population and set the migration rates to zero
bnu1hrf_func = lambda t: (nu1 * b1**(t/Ts)) * hrf
bnu2hrf_func = lambda t: (nu2 * b1**(t/Ts)) * hrf
philr = dadi.Integration.two_pops(philr, xx, Ts, bnu1hrf_func, bnu2hrf_func, m12=0, m21=0)
## calculate the spectrum.
fslr = dadi.Spectrum.from_phi(philr, (n1,n2), (xx,xx))
#### Sum the two spectra
fs= (1-Q)*fsnr + Q*fslr
return fs
def AMA2mG(params, (n1,n2), pts):
nuA, nu1, nu2, b1, b2, m12, m21, me12, me21, Tam, Tp, Ts, P = params
"""
Model of semi permeability with split, ancient migration with 2 migration rates
nu1: Size of population 1 after split.
nu2: Size of population 2 after split.
b1: Population growth coefficient of population 1
b2: Population growth coefficient of population 2
m12: Migration from pop 2 to pop 1 (2*Na*m12).
m21: Migration from pop 1 to pop 2.
me12: Effective migration from pop 2 to pop 1 in genomic islands.
me21: Effective migration from pop 1 to pop 2 in genomic islands.
Tam: The scaled time between the split and the end of ancient migration (in units of 2*Na generations).
Ts: The scaled time between the end of ancient migration and present.
P: The porportion of the genome evolving neutrally
n1,n2: Size of fs to generate.
pts: Number of points to use in grid for evaluation.
"""
# Define the grid we'll use
xx = dadi.Numerics.default_grid(pts)
### Calculate the neutral spectrum
# phi for the equilibrium ancestral population
phiN = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
phiN = dadi.Integration.one_pop(phiN, xx, Tp, nu=nuA)
# Now do the divergence event
phiN = dadi.PhiManip.phi_1D_to_2D(xx, phiN)
# We set the population sizes nu1 & nu2 after the split and set the migration rate to m12 and m21
phiN = dadi.Integration.two_pops(phiN, xx, Tam, nu1, nu2, m12=m12, m21=m21)
# We start the population reduction after the split and set the migration rates to zero
bnu1_func = lambda t: nu1 * b1**(t/Ts)
bnu2_func = lambda t: nu2 * b2**(t/Ts)
phiN = dadi.Integration.two_pops(phiN, xx, Ts, bnu1_func, bnu2_func, m12=0, m21=0)
## calculate the spectrum.
fsN = dadi.Spectrum.from_phi(phiN, (n1,n2), (xx,xx))
### Calculate the genomic island spectrum
# phi for the equilibrium ancestral population
phiI = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
phiI = dadi.Integration.one_pop(phiI, xx, Tp, nu=nuA)
# Now do the divergence event
phiI = dadi.PhiManip.phi_1D_to_2D(xx, phiI)
# We set the population sizes nu1 & nu2 after the split and set the migration rate to me12 and me21
phiI = dadi.Integration.two_pops(phiI, xx, Tam, nu1, nu2, m12=me12, m21=me21)
# We start the population reduction after the split and set the migration rates to zero
bnu1_func = lambda t: nu1 * b1**(t/Ts)
bnu2_func = lambda t: nu2 * b2**(t/Ts)
phiI = dadi.Integration.two_pops(phiI, xx, Ts, bnu1_func, bnu2_func, m12=me12, m21=me21)
## calculate the spectrum.
fsI = dadi.Spectrum.from_phi(phiI, (n1,n2), (xx,xx))
### Sum the two spectra in proportion O (and P)
fs = P*fsN + (1-P)*fsI
return fs
def AMA2N2mG(params, (n1,n2), pts):
nuA, nu1, nu2, b1, b2, hrf, m12, m21, me12, me21, Tam, Tp, Ts, P, Q = params
"""
Model of semi permeability with split, ancient migration with 2 migration rates, heterogenous effective population size (2 classes, shared by the two populations = background selection)
nu1: Size of population 1 after split.
nu2: Size of population 2 after split.
b1: Population growth coefficient of population 1
b2: Population growth coefficient of population 2
hrf: Hill-Robertson factor, i.e. the degree to which Ne is locally reduced due to the effects of background selection and selective sweep effects
m12: Migration from pop 2 to pop 1 (2*Na*m12).
m21: Migration from pop 1 to pop 2.
me12: Effective migration from pop 2 to pop 1 in genomic islands.
me21: Effective migration from pop 1 to pop 2 in genomic islands.
Ts: The scaled time between the split and the ancient migration (in units of 2*Na generations).
Tam: The scale time between the ancient migration and present.
P: The proportion of the genome evolving neutrally
Q: The proportion of the genome with a reduced effective size due to selection at linked sites
n1,n2: Size of fs to generate.
pts: Number of points to use in grid for evaluation.
"""
# Define the grid we'll use
xx = dadi.Numerics.default_grid(pts)
#### Calculate the neutral spectrum
# phi for the equilibrium ancestral population
phiN = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
phiN = dadi.Integration.one_pop(phiN, xx, Tp, nu=nuA)
# Now do the divergence event
phiN = dadi.PhiManip.phi_1D_to_2D(xx, phiN)
# We SET the population size nu1 & nu2 after the split and set the migration rates to m12 & m21
phiN = dadi.Integration.two_pops(phiN, xx, Tam, nu1, nu2, m12=m12, m21=m21)
# We set the population sizes change independently in each population after ancient migration to bnu1_func and bnu2_func and set the migration rates to zero
bnu1_func = lambda t: nu1 * b1**(t/Ts)
bnu2_func = lambda t: nu2 * b2**(t/Ts)
phiN = dadi.Integration.two_pops(phiN, xx, Ts, bnu1_func, bnu2_func, m12=0, m21=0)
###
## calculate the spectrum.
fsN = dadi.Spectrum.from_phi(phiN, (n1,n2), (xx,xx))
#### Calculate the genomic island spectrum
# phi for the equilibrium ancestral population
phiI = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
phiI = dadi.Integration.one_pop(phiI, xx, Tp, nu=nuA)
# Now do the divergence event
phiI = dadi.PhiManip.phi_1D_to_2D(xx, phiI)
# We set the population size after the split in each population and set the migration rates to me12 & me21
phiI = dadi.Integration.two_pops(phiI, xx, Tam, nu1, nu2, m12=me12, m21=me21)
# We keep the population sizes change after ancient migration to bnu1_func and bnu2_func and set the migration rates | |
<reponame>cds-snc/notifier-api
import datetime
import uuid
import pytest
from boto3.exceptions import Boto3Error
from freezegun import freeze_time
from notifications_utils.recipients import (
validate_and_format_email_address,
validate_and_format_phone_number,
)
from sqlalchemy.exc import SQLAlchemyError
from app.models import (
LETTER_TYPE,
Notification,
NotificationHistory,
ScheduledNotification,
Template,
)
from app.notifications.process_notifications import (
choose_queue,
create_content_for_notification,
db_save_and_send_notification,
persist_notification,
persist_notifications,
persist_scheduled_notification,
send_notification_to_queue,
simulated_recipient,
transform_notification,
)
from app.v2.errors import BadRequestError
from tests.app.conftest import create_sample_api_key
from tests.app.db import create_service, create_template
def test_create_content_for_notification_passes(sample_email_template):
template = Template.query.get(sample_email_template.id)
content = create_content_for_notification(template, None)
assert str(content) == template.content
def test_create_content_for_notification_with_placeholders_passes(
sample_template_with_placeholders,
):
template = Template.query.get(sample_template_with_placeholders.id)
content = create_content_for_notification(template, {"name": "Bobby"})
assert content.content == template.content
assert "Bobby" in str(content)
def test_create_content_for_notification_fails_with_missing_personalisation(
sample_template_with_placeholders,
):
template = Template.query.get(sample_template_with_placeholders.id)
with pytest.raises(BadRequestError):
create_content_for_notification(template, None)
def test_create_content_for_notification_allows_additional_personalisation(
sample_template_with_placeholders,
):
template = Template.query.get(sample_template_with_placeholders.id)
create_content_for_notification(template, {"name": "Bobby", "Additional placeholder": "Data"})
def test_persist_notification_throws_exception_when_missing_template(sample_api_key):
assert Notification.query.count() == 0
assert NotificationHistory.query.count() == 0
with pytest.raises(SQLAlchemyError):
persist_notification(
template_id=None,
template_version=None,
recipient="+16502532222",
service=sample_api_key.service,
personalisation=None,
notification_type="sms",
api_key_id=sample_api_key.id,
key_type=sample_api_key.key_type,
)
assert Notification.query.count() == 0
assert NotificationHistory.query.count() == 0
@pytest.mark.skip(reason="Deprecated: This test needs to use the persist_notifications path")
def test_persist_notification_does_not_increment_cache_if_test_key(
notify_db, notify_db_session, sample_template, sample_job, mocker
):
api_key = create_sample_api_key(
notify_db=notify_db,
notify_db_session=notify_db_session,
service=sample_template.service,
key_type="test",
)
mocker.patch("app.notifications.process_notifications.redis_store.get", return_value="cache")
mocker.patch(
"app.notifications.process_notifications.redis_store.get_all_from_hash",
return_value="cache",
)
daily_limit_cache = mocker.patch("app.notifications.process_notifications.redis_store.incr")
template_usage_cache = mocker.patch("app.notifications.process_notifications.redis_store.increment_hash_value")
mocker.patch("app.notifications.process_notifications.dao_get_template_by_id", return_value=sample_template)
mocker.patch("app.notifications.process_notifications.choose_queue", return_value="email_queue")
assert Notification.query.count() == 0
assert NotificationHistory.query.count() == 0
persist_notification(
template_id=sample_template.id,
template_version=sample_template.version,
recipient="+16502532222",
service=sample_template.service,
personalisation={},
notification_type="sms",
api_key_id=api_key.id,
key_type=api_key.key_type,
job_id=sample_job.id,
job_row_number=100,
reference="ref",
)
assert Notification.query.count() == 1
assert not daily_limit_cache.called
assert not template_usage_cache.called
@pytest.mark.skip(reason="Deprecated: This test needs to use the persist_notifications path")
@freeze_time("2016-01-01 11:09:00.061258")
def test_persist_notification_with_optionals(sample_job, sample_api_key, mocker, sample_template):
assert Notification.query.count() == 0
assert NotificationHistory.query.count() == 0
mocked_redis = mocker.patch("app.notifications.process_notifications.redis_store.get")
mocker.patch("app.notifications.process_notifications.dao_get_template_by_id", return_value=sample_template)
mocker.patch("app.notifications.process_notifications.choose_queue", return_value="email_queue")
n_id = uuid.uuid4()
created_at = datetime.datetime(2016, 11, 11, 16, 8, 18)
persist_notification(
template_id=sample_job.template.id,
template_version=sample_job.template.version,
recipient="+16502532222",
service=sample_job.service,
personalisation=None,
notification_type="sms",
api_key_id=sample_api_key.id,
key_type=sample_api_key.key_type,
created_at=created_at,
job_id=sample_job.id,
job_row_number=10,
client_reference="ref from client",
notification_id=n_id,
created_by_id=sample_job.created_by_id,
)
assert Notification.query.count() == 1
assert NotificationHistory.query.count() == 0
persisted_notification = Notification.query.all()[0]
assert persisted_notification.id == n_id
persisted_notification.job_id == sample_job.id
assert persisted_notification.job_row_number == 10
assert persisted_notification.created_at == created_at
mocked_redis.assert_called_once_with(str(sample_job.service_id) + "-2016-01-01-count")
assert persisted_notification.client_reference == "ref from client"
assert persisted_notification.reference is None
assert persisted_notification.international is False
assert persisted_notification.phone_prefix == "1"
assert persisted_notification.rate_multiplier == 1
assert persisted_notification.created_by_id == sample_job.created_by_id
assert not persisted_notification.reply_to_text
@pytest.mark.skip(reason="Deprecated: This test needs to use the persist_notifications path")
@freeze_time("2016-01-01 11:09:00.061258")
def test_persist_notification_doesnt_touch_cache_for_old_keys_that_dont_exist(sample_template, sample_api_key, mocker):
mock_incr = mocker.patch("app.notifications.process_notifications.redis_store.incr")
mocker.patch("app.notifications.process_notifications.redis_store.get", return_value=None)
mocker.patch(
"app.notifications.process_notifications.redis_store.get_all_from_hash",
return_value=None,
)
persist_notification(
template_id=sample_template.id,
template_version=sample_template.version,
recipient="+16502532222",
service=sample_template.service,
personalisation={},
notification_type="sms",
api_key_id=sample_api_key.id,
key_type=sample_api_key.key_type,
reference="ref",
)
mock_incr.assert_not_called()
@pytest.mark.skip(reason="Deprecated: This test needs to use the persist_notifications path")
@freeze_time("2016-01-01 11:09:00.061258")
def test_persist_notification_increments_cache_if_key_exists(sample_template, sample_api_key, mocker):
mock_incr = mocker.patch("app.notifications.process_notifications.redis_store.incr")
mocker.patch("app.notifications.process_notifications.redis_store.get", return_value=1)
mocker.patch(
"app.notifications.process_notifications.redis_store.get_all_from_hash",
return_value={sample_template.id, 1},
)
mocker.patch("app.notifications.process_notifications.dao_get_template_by_id", return_value=sample_template)
mocker.patch("app.notifications.process_notifications.choose_queue", return_value="email_queue")
persist_notification(
template_id=sample_template.id,
template_version=sample_template.version,
recipient="+16502532222",
service=sample_template.service,
personalisation={},
notification_type="sms",
api_key_id=sample_api_key.id,
key_type=sample_api_key.key_type,
reference="ref2",
)
mock_incr.assert_called_once_with(
str(sample_template.service_id) + "-2016-01-01-count",
)
@pytest.mark.parametrize(
("research_mode, requested_queue, notification_type, key_type, reply_to_text, expected_queue, expected_task"),
[
(True, None, "sms", "normal", None, "research-mode-tasks", "deliver_sms"),
(True, None, "email", "normal", None, "research-mode-tasks", "deliver_email"),
(True, None, "email", "team", None, "research-mode-tasks", "deliver_email"),
(
True,
None,
"letter",
"normal",
None,
"research-mode-tasks",
"letters_pdf_tasks.create_letters_pdf",
),
(
True,
None,
"sms",
"normal",
"+14383898585",
"send-throttled-sms-tasks",
"deliver_throttled_sms",
),
(False, None, "sms", "normal", None, "send-sms-tasks", "deliver_sms"),
(False, None, "email", "normal", None, "send-email-tasks", "deliver_email"),
(False, None, "sms", "team", None, "send-sms-tasks", "deliver_sms"),
(
False,
None,
"letter",
"normal",
None,
"create-letters-pdf-tasks",
"letters_pdf_tasks.create_letters_pdf",
),
(False, None, "sms", "test", None, "research-mode-tasks", "deliver_sms"),
(
False,
None,
"sms",
"normal",
"+14383898585",
"send-throttled-sms-tasks",
"deliver_throttled_sms",
),
(
True,
"notify-internal-tasks",
"email",
"normal",
None,
"research-mode-tasks",
"deliver_email",
),
(
False,
"notify-internal-tasks",
"sms",
"normal",
None,
"notify-internal-tasks",
"deliver_sms",
),
(
False,
"notify-internal-tasks",
"email",
"normal",
None,
"notify-internal-tasks",
"deliver_email",
),
(
False,
"notify-internal-tasks",
"sms",
"test",
None,
"research-mode-tasks",
"deliver_sms",
),
(
False,
"notify-internal-tasks",
"sms",
"normal",
"+14383898585",
"send-throttled-sms-tasks",
"deliver_throttled_sms",
),
],
)
def test_send_notification_to_queue(
notify_db,
notify_db_session,
research_mode,
requested_queue,
notification_type,
key_type,
reply_to_text,
expected_queue,
expected_task,
mocker,
):
if "." not in expected_task:
expected_task = f"provider_tasks.{expected_task}"
mocked = mocker.patch(f"app.celery.{expected_task}.apply_async")
notification = Notification(
id=uuid.uuid4(),
key_type=key_type,
notification_type=notification_type,
created_at=datetime.datetime(2016, 11, 11, 16, 8, 18),
reply_to_text=reply_to_text,
)
send_notification_to_queue(notification=notification, research_mode=research_mode, queue=requested_queue)
mocked.assert_called_once_with([str(notification.id)], queue=expected_queue)
def test_send_notification_to_queue_throws_exception_deletes_notification(sample_notification, mocker):
mocked = mocker.patch(
"app.celery.provider_tasks.deliver_sms.apply_async",
side_effect=Boto3Error("EXPECTED"),
)
with pytest.raises(Boto3Error):
send_notification_to_queue(sample_notification, False)
mocked.assert_called_once_with([(str(sample_notification.id))], queue="send-sms-tasks")
assert Notification.query.count() == 0
assert NotificationHistory.query.count() == 0
@pytest.mark.parametrize(
"to_address, notification_type, expected",
[
("+16132532222", "sms", True),
("+16132532223", "sms", True),
("6132532222", "sms", True),
("<EMAIL>", "email", True),
("<EMAIL>", "email", True),
("<EMAIL>", "email", True),
("6132532225", "sms", False),
("<EMAIL>", "email", False),
],
)
def test_simulated_recipient(notify_api, to_address, notification_type, expected):
"""
The values where the expected = 'research-mode' are listed in the config['SIMULATED_EMAIL_ADDRESSES']
and config['SIMULATED_SMS_NUMBERS']. These values should result in using the research mode queue.
SIMULATED_EMAIL_ADDRESSES = (
'<EMAIL>',
'<EMAIL>',
'<EMAIL>'
)
SIMULATED_SMS_NUMBERS = ('6132532222', '+16132532222', '+16132532223')
"""
formatted_address = None
if notification_type == "email":
formatted_address = validate_and_format_email_address(to_address)
else:
formatted_address = validate_and_format_phone_number(to_address)
is_simulated_address = simulated_recipient(formatted_address, notification_type)
assert is_simulated_address == expected
@pytest.mark.parametrize(
"recipient, expected_international, expected_prefix, expected_units",
[
("6502532222", False, "1", 1), # NA
("+16502532222", False, "1", 1), # NA
("+79587714230", True, "7", 1), # Russia
("+360623400400", True, "36", 3),
], # Hungary
)
def test_persist_notification_with_international_info_stores_correct_info(
sample_job,
sample_api_key,
mocker,
recipient,
expected_international,
expected_prefix,
expected_units,
):
persist_notification(
template_id=sample_job.template.id,
template_version=sample_job.template.version,
recipient=recipient,
service=sample_job.service,
personalisation=None,
notification_type="sms",
api_key_id=sample_api_key.id,
key_type=sample_api_key.key_type,
job_id=sample_job.id,
job_row_number=10,
client_reference="ref from client",
)
persisted_notification = Notification.query.all()[0]
assert persisted_notification.international is expected_international
assert persisted_notification.phone_prefix == expected_prefix
assert persisted_notification.rate_multiplier == expected_units
def test_persist_notification_with_international_info_does_not_store_for_email(sample_job, sample_api_key, mocker):
persist_notification(
template_id=sample_job.template.id,
template_version=sample_job.template.version,
recipient="<EMAIL>",
service=sample_job.service,
personalisation=None,
notification_type="email",
api_key_id=sample_api_key.id,
key_type=sample_api_key.key_type,
job_id=sample_job.id,
job_row_number=10,
client_reference="ref from client",
)
persisted_notification = Notification.query.all()[0]
assert persisted_notification.international is False
assert persisted_notification.phone_prefix is None
assert persisted_notification.rate_multiplier is None
# This test assumes the local timezone is EST
def test_persist_scheduled_notification(sample_notification):
persist_scheduled_notification(sample_notification.id, "2017-05-12 14:15")
scheduled_notification = ScheduledNotification.query.all()
assert len(scheduled_notification) == 1
assert scheduled_notification[0].notification_id == sample_notification.id
assert scheduled_notification[0].scheduled_for == datetime.datetime(2017, 5, 12, 18, 15)
@pytest.mark.parametrize(
"recipient, expected_recipient_normalised",
[
("6502532222", "+16502532222"),
(" 6502532223", "+16502532223"),
("6502532223", "+16502532223"),
],
)
def test_persist_sms_notification_stores_normalised_number(
sample_job, sample_api_key, mocker, recipient, expected_recipient_normalised
):
persist_notification(
template_id=sample_job.template.id,
template_version=sample_job.template.version,
recipient=recipient,
service=sample_job.service,
personalisation=None,
notification_type="sms",
api_key_id=sample_api_key.id,
key_type=sample_api_key.key_type,
job_id=sample_job.id,
)
persisted_notification = Notification.query.all()[0]
assert persisted_notification.to == recipient
assert persisted_notification.normalised_to == expected_recipient_normalised
@pytest.mark.parametrize(
"recipient, expected_recipient_normalised",
[("<EMAIL>", "<EMAIL>"), ("<EMAIL>", "<EMAIL>")],
)
def test_persist_email_notification_stores_normalised_email(
sample_job, sample_api_key, mocker, recipient, expected_recipient_normalised
):
persist_notification(
template_id=sample_job.template.id,
template_version=sample_job.template.version,
recipient=recipient,
service=sample_job.service,
personalisation=None,
notification_type="email",
api_key_id=sample_api_key.id,
key_type=sample_api_key.key_type,
job_id=sample_job.id,
)
persisted_notification = Notification.query.all()[0]
assert persisted_notification.to == recipient
assert persisted_notification.normalised_to == expected_recipient_normalised
@pytest.mark.parametrize(
"postage_argument, template_postage, expected_postage",
[
("second", "first", "second"),
("first", "first", "first"),
("first", "second", "first"),
(None, "second", "second"),
],
)
def test_persist_letter_notification_finds_correct_postage(
mocker,
notify_db,
notify_db_session,
postage_argument,
template_postage,
expected_postage,
):
service = create_service(service_permissions=[LETTER_TYPE])
api_key = create_sample_api_key(notify_db, notify_db_session, service=service)
template = create_template(service, template_type=LETTER_TYPE, postage=template_postage)
mocker.patch("app.dao.templates_dao.dao_get_template_by_id", return_value=template)
persist_notification(
template_id=template.id,
template_version=template.version,
template_postage=template.postage,
recipient="<NAME>, 10 Downing Street, London",
service=service,
personalisation=None,
notification_type=LETTER_TYPE,
api_key_id=api_key.id,
key_type=api_key.key_type,
postage=postage_argument,
)
persisted_notification = Notification.query.all()[0]
assert persisted_notification.postage == expected_postage
def test_persist_notification_with_billable_units_stores_correct_info(mocker, notify_db_session):
service = create_service(service_permissions=[LETTER_TYPE])
template = create_template(service, template_type=LETTER_TYPE)
mocker.patch("app.dao.templates_dao.dao_get_template_by_id", return_value=template)
persist_notification(
template_id=template.id,
template_version=template.version,
recipient="123 Main Street",
service=template.service,
personalisation=None,
notification_type=template.template_type,
api_key_id=None,
key_type="normal",
billable_units=3,
template_postage=template.postage,
)
persisted_notification = Notification.query.all()[0]
assert persisted_notification.billable_units == 3
class TestPersistNotifications:
def test_persist_notifications_list(self, sample_job, sample_api_key, notify_db_session):
persist_notifications(
[
dict(
template_id=sample_job.template.id,
template_version=sample_job.template.version,
recipient="<EMAIL>",
service=sample_job.service,
personalisation=None,
notification_type="email",
api_key_id=sample_api_key.id,
key_type=sample_api_key.key_type,
job_id=sample_job.id,
job_row_number=10,
client_reference="ref from client",
),
dict(
template_id=sample_job.template.id,
template_version=sample_job.template.version,
recipient="<EMAIL>",
service=sample_job.service,
personalisation=None,
notification_type="email",
api_key_id=sample_api_key.id,
key_type=sample_api_key.key_type,
job_id=sample_job.id,
job_row_number=10,
client_reference="ref from client",
),
]
)
persisted_notification = Notification.query.all()
assert persisted_notification[0].to == "<EMAIL>"
assert persisted_notification[1].to == "<EMAIL>"
assert persisted_notification[0].service == sample_job.service
@pytest.mark.parametrize(
("research_mode, requested_queue, notification_type, key_type, reply_to_text, expected_queue"),
[
(True, None, "sms", "normal", None, "research-mode-tasks"),
(True, None, "email", "normal", None, "research-mode-tasks"),
(True, None, "email", "team", None, "research-mode-tasks"),
(
True,
None,
"letter",
"normal",
None,
"research-mode-tasks",
),
(
True,
None,
"sms",
"normal",
"+14383898585",
"send-throttled-sms-tasks",
),
(False, None, "sms", "normal", None, "send-sms-tasks"),
(False, None, "email", "normal", None, "send-email-tasks"),
(False, None, "sms", "team", None, "send-sms-tasks"),
(
False,
None,
"letter",
"normal",
None,
"create-letters-pdf-tasks",
),
(False, None, "sms", "test", None, "research-mode-tasks"),
(
False,
None,
"sms",
"normal",
"+14383898585",
"send-throttled-sms-tasks",
),
(
True,
"notify-internal-tasks",
"email",
"normal",
None,
"research-mode-tasks",
),
(
False,
"notify-internal-tasks",
"sms",
"normal",
None,
"notify-internal-tasks",
),
(
False,
"notify-internal-tasks",
"email",
"normal",
None,
"notify-internal-tasks",
),
(
False,
"notify-internal-tasks",
"sms",
"test",
None,
"research-mode-tasks",
),
(
False,
"notify-internal-tasks",
"sms",
"normal",
"+14383898585",
"send-throttled-sms-tasks",
),
],
)
def test_choose_queue(
sample_template,
sample_api_key,
sample_job,
research_mode,
requested_queue,
notification_type,
key_type,
reply_to_text,
expected_queue,
):
notification = Notification(
id=uuid.uuid4(),
template_id=sample_template.id,
template_version=sample_template.version,
service=sample_template.service,
personalisation={},
notification_type=notification_type,
api_key_id=sample_api_key.id,
key_type=key_type,
job_id=sample_job.id,
job_row_number=100,
reference="ref",
reply_to_text=reply_to_text,
to="+16502532222",
created_at=datetime.datetime(2016, 11, 11, 16, 8, 18),
)
assert choose_queue(notification, research_mode, requested_queue) == expected_queue
class TestTransformNotification:
def test_transform_notification_with_optionals(self, sample_job, sample_api_key, notify_db_session):
assert Notification.query.count() == 0
assert NotificationHistory.query.count() == 0
n_id = uuid.uuid4()
created_at = datetime.datetime(2016, 11, 11, 16, 8, 18)
notification = transform_notification(
template_id=sample_job.template.id,
template_version=sample_job.template.version,
recipient="+16502532222",
service=sample_job.service,
personalisation=None,
notification_type="sms",
api_key_id=sample_api_key.id,
key_type=sample_api_key.key_type,
created_at=created_at,
job_id=sample_job.id,
job_row_number=10,
client_reference="ref from client",
notification_id=n_id,
created_by_id=sample_job.created_by_id,
)
assert notification.id == n_id
assert notification.job_id == sample_job.id
assert notification.job_row_number == 10
assert notification.created_at == created_at
assert notification.client_reference == "ref from client"
assert notification.reference is None
assert notification.international is False
assert notification.phone_prefix == "1"
assert notification.rate_multiplier == 1
assert | |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from builtins import str
from builtins import range
from builtins import object
from copy import copy, deepcopy
import numpy as np
from scipy.optimize import newton
from HARK import AgentType, Solution, NullFunc, HARKobject, makeOnePeriodOOSolver
from HARK.utilities import warnings # Because of "patch" to warnings modules
from HARK.interpolation import(
LinearInterp, # Piecewise linear interpolation
CubicInterp, # Piecewise cubic interpolation
LinearInterpOnInterp1D, # Interpolator over 1D interpolations
BilinearInterp, # 2D interpolator
ConstantFunction, # Interpolator-like class that returns constant value
IdentityFunction # Interpolator-like class that returns one of its arguments
)
from HARK.distribution import Lognormal, MeanOneLogNormal, Uniform
from HARK.distribution import DiscreteDistribution, addDiscreteOutcomeConstantMean, combineIndepDstns
from HARK.utilities import makeGridExpMult, CRRAutility, CRRAutilityP, \
CRRAutilityPP, CRRAutilityP_inv, CRRAutility_invP, CRRAutility_inv, \
CRRAutilityP_invP
from HARK import _log
from HARK import set_verbosity_level
__all__ = ['ConsumerSolution', 'ValueFunc', 'MargValueFunc', 'MargMargValueFunc',
'ConsPerfForesightSolver', 'ConsIndShockSetup', 'ConsIndShockSolverBasic', 'PerfForesightConsumerType',
'init_perfect_foresight', 'init_lifecycle','init_cyclical']
utility = CRRAutility
utilityP = CRRAutilityP
utilityPP = CRRAutilityPP
utilityP_inv = CRRAutilityP_inv
utility_invP = CRRAutility_invP
utility_inv = CRRAutility_inv
utilityP_invP = CRRAutilityP_invP
# =====================================================================
# === Classes that help solve consumption-saving models ===
# =====================================================================
class ConsumerSolution(Solution):
'''
A class representing the solution of a single period of a consumption-saving
problem. The solution must include a consumption function and marginal
value function.
Here and elsewhere in the code, Nrm indicates that variables are normalized
by permanent income.
'''
distance_criteria = ['vPfunc']
def __init__(self, cFunc=None, vFunc=None,
vPfunc=None, vPPfunc=None,
mNrmMin=None, hNrm=None, MPCmin=None, MPCmax=None, HNrm=None):
'''
The constructor for a new ConsumerSolution object.
Parameters
----------
cFunc : function
The consumption function for this period, defined over market
resources and habit stocks: c = cFunc(m, H).
vFunc : function
The beginning-of-period value function for this period, defined over
market resources and habit stocks: v = vFunc(m,H).
vPfunc : function
The beginning-of-period marginal value function for this period,
defined over market resources and habit stocks: vP = vPfunc(m,H).
vPPfunc : function
The beginning-of-period marginal marginal value function for this
period, defined over market resources and habit stocks: vPP = vPPfunc(m,H).
mNrmMin : float
The minimum allowable market resources for this period; the consump-
tion function (etc) are undefined for m < mNrmMin.
hNrm : float
Human wealth after receiving income this period: PDV of all future
income, ignoring mortality.
MPCmin : float
Infimum of the marginal propensity to consume this period.
MPC --> MPCmin as m --> infinity.
MPCmax : float
Supremum of the marginal propensity to consume this period.
MPC --> MPCmax as m --> mNrmMin.
HNrm : float
The habit stock for this period, follow a law of motion
Returns
-------
None
'''
# Change any missing function inputs to NullFunc
self.cFunc = cFunc if cFunc is not None else NullFunc()
self.vFunc = vFunc if vFunc is not None else NullFunc()
self.vPfunc = vPfunc if vPfunc is not None else NullFunc()
# vPFunc = NullFunc() if vPfunc is None else vPfunc
self.vPPfunc = vPPfunc if vPPfunc is not None else NullFunc()
self.mNrmMin = mNrmMin
self.hNrm = hNrm
self.MPCmin = MPCmin
self.MPCmax = MPCmax
self.HNrm = HNrm
def appendSolution(self,new_solution):
'''
Appends one solution to another to create a ConsumerSolution whose
attributes are lists. Used in ConsMarkovModel, where we append solutions
*conditional* on a particular value of a Markov state to each other in
order to get the entire solution.
Parameters
----------
new_solution : ConsumerSolution
The solution to a consumption-saving problem; each attribute is a
list representing state-conditional values or functions.
Returns
-------
None
'''
if type(self.cFunc)!=list:
# Then we assume that self is an empty initialized solution instance.
# Begin by checking this is so.
assert NullFunc().distance(self.cFunc) == 0, 'appendSolution called incorrectly!'
# We will need the attributes of the solution instance to be lists. Do that here.
self.cFunc = [new_solution.cFunc]
self.vFunc = [new_solution.vFunc]
self.vPfunc = [new_solution.vPfunc]
self.vPPfunc = [new_solution.vPPfunc]
self.mNrmMin = [new_solution.mNrmMin]
self.haNrm = [new_solution.haNrm]
else:
self.cFunc.append(new_solution.cFunc)
self.vFunc.append(new_solution.vFunc)
self.vPfunc.append(new_solution.vPfunc)
self.vPPfunc.append(new_solution.vPPfunc)
self.mNrmMin.append(new_solution.mNrmMin)
self.haNrm.append(new_solution.haNrm)
class ValueFunc(HARKobject):
'''
A class for representing a value function. The underlying interpolation is
in the space of (m,u_inv(v)); this class "re-curves" to the value function.
'''
distance_criteria = ['func','CRRA']
def __init__(self,vFuncNvrs,CRRA):
'''
Constructor for a new value function object.
Parameters
----------
vFuncNvrs : function
A real function representing the value function composed with the
inverse utility function, defined on market resources: u_inv(vFunc(m))
CRRA : float
Coefficient of relative risk aversion.
Returns
-------
None
'''
self.func = deepcopy(vFuncNvrs)
self.CRRA = CRRA
def __call__(self,m):
'''
Evaluate the value function at given levels of market resources m.
Parameters
----------
m : float or np.array
Market resources (normalized by permanent income) whose value is to
be found.
Returns
-------
v : float or np.array
Lifetime value of beginning this period with market resources m; has
same size as input m.
'''
return utility(self.func(m),gam=self.CRRA)
class ValueFunc2D(HARKobject):
'''
A class for representing a value function in a model where habit stocks
is included as the second state variable. The underlying interpolation is
in the space of (mNrm,haNrm) --> u_inv(v); this class "re-curves" to the value function.
'''
distance_criteria = ['func', 'CRRA']
def __init__(self, vFuncNvrs, CRRA):
'''
Constructor for a new value function object.
Parameters
----------
vFuncNvrs : function
A real function representing the value function composed with the
inverse utility function, defined on market resources and habit
stocks: u_inv(vFunc(m,H))
CRRA : float
Coefficient of relative risk aversion.
Returns
-------
None
'''
self.func = deepcopy(vFuncNvrs)
self.CRRA = CRRA
def __call__(self, m, H):
'''
Evaluate the value function at given levels of normalized market resources mNrm
and normalized habit stocks haNrm.
Parameters
----------
m : float or np.array
Market resources (normalized by permanent income) whose value is to
be found.
H : float or np.array
Habit stocks (normalized by permanent income) whose value is to
be found.
Returns
-------
v : float or np.array
Lifetime value of beginning this period with normalized market resources
m and normalized habit stock H; has same size as inputs m and H.
'''
return utility((self.func(m, H)/(H**self.Habitgamma)), gam=self.CRRA)
class MargValueFunc(HARKobject):
'''
A class for representing a marginal value function in models where the
standard envelope condition of v'(m) = u'(c(m)) holds (with CRRA utility).
'''
distance_criteria = ['cFunc','CRRA']
def __init__(self,cFunc,CRRA):
'''
Constructor for a new marginal value function object.
Parameters
----------
cFunc : function
A real function representing the marginal value function composed
with the inverse marginal utility function, defined on market
resources: uP_inv(vPfunc(m)). Called cFunc because when standard
envelope condition applies, uP_inv(vPfunc(m)) = cFunc(m).
CRRA : float
Coefficient of relative risk aversion.
Returns
-------
None
'''
self.cFunc = deepcopy(cFunc)
self.CRRA = CRRA
def __call__(self,m):
'''
Evaluate the marginal value function at given levels of market resources m.
Parameters
----------
m : float or np.array
Market resources (normalized by permanent income) whose marginal
value is to be found.
Returns
-------
vP : float or np.array
Marginal lifetime value of beginning this period with market
resources m; has same size as input m.
'''
return utilityP(self.cFunc(m),gam=self.CRRA)
def derivative(self,m):
'''
Evaluate the derivative of the marginal value function at given levels
of market resources m; this is the marginal marginal value function.
Parameters
----------
m : float or np.array
Market resources (normalized by permanent income) whose marginal
marginal value is to be found.
Returns
-------
vPP : float or np.array
Marginal marginal lifetime value of beginning this period with market
resources m; has same size as input m.
'''
c, MPC = self.cFunc.eval_with_derivative(m)
return MPC*utilityPP(c,gam=self.CRRA)
class MargValueFunc2D(HARKobject):
'''
A class for representing a marginal value function in models where the
standard envelope condition of dvdm(m,H) = u'(c(m,H)) holds (with CRRA utility).
'''
distance_criteria = ['cFunc', 'CRRA']
def __init__(self, cFunc, CRRA):
'''
Constructor for a new marginal value function object.
Parameters
----------
cFunc : function
A real function representing the marginal value function composed
with the inverse marginal utility function, defined on normalized individual market
resources and normalized habit stocks: uP_inv(vPfunc(m,H)).
Called cFunc because when standard envelope condition applies,
uP_inv(vPfunc(m,H)) = cFunc(m,H).
CRRA : float
Coefficient of relative risk aversion.
Returns
-------
new instance of MargValueFunc
'''
self.cFunc = deepcopy(cFunc)
self.CRRA = | |
2]))
cases[-1]['casedir'] = 'design-48-32a'
cases.append(defaultCase(48, 3, 7, [4, 3, 2, 2, 2, 2, 2]))
cases.append(
defaultCase(48, 3, 14, [4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]))
cases[-1]['casedir'] = 'design-48-42a-t3'
cases.append(defaultCase(48, 3, 25, [
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]))
cases[-1]['casedir'] = 'design-48-2a-t3'
if allcases:
cases.append(defaultCase(54, 3, 4, [6, 3, 3, 3]))
cases.append(defaultCase(54, 3, 7, [2, 3, 3, 3, 3, 3, 3]))
cases.append(defaultCase(54, 3, 6, [3, 3, 3, 3, 3, 3]))
cases.append(defaultCase(56, 3, 5, [14, 2, 2, 2, 2]))
cases[-1]['casedir'] = 'design-56-142a-t3'
cases.append(defaultCase(56, 3, 7, [7, 2, 2, 2, 2, 2, 2]))
cases.append(defaultCase(56, 3, 32, [
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]))
cases[-1]['casedir'] = 'design-56-2a-t3'
cases.append(defaultCase(60, 3, 4, [5, 3, 2, 2]))
cases.append(defaultCase(64, 3, 5, [16, 2, 2, 2, 2]))
cases[-1]['casedir'] = 'design-64-162a-t3'
cases.append(defaultCase(64, 3, 4, [8, 4, 2, 2]))
cases.append(defaultCase(64, 3, 9, [8, 2, 2, 2, 2, 2, 2, 2, 2]))
cases[-1]['casedir'] = 'design-64-82'
cases.append(defaultCase(64, 3, 7, [4, 4, 4, 4, 4, 4, 4]))
cases.append(defaultCase(64, 3, 8, [4, 4, 4, 4, 4, 4, 2, 2]))
if allcases:
cases.append(defaultCase(64, 3, 9, [4, 4, 4, 4, 4, 2, 2, 2, 2]))
cases.append(defaultCase(64, 3, 11, [4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2]))
cases.append(
defaultCase(64, 3, 13, [4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]))
cases.append(
defaultCase(64, 3, 18, [4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]))
cases[-1]['casedir'] = 'design-64-442a-t3'
cases.append(
defaultCase(64, 3, 17, [4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]))
cases[-1]['casedir'] = 'design-64-42a-t3'
cases.append(defaultCase(64, 3, 35, [
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]))
cases[-1]['casedir'] = 'design-64-2a-t3'
if allcases:
cases.append(defaultCase(72, 3, 4, [4, 3, 3, 2]))
cases.append(
defaultCase(72, 3, 15, [3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]))
cases[-1]['casedir'] = 'design-72-332a-t3'
cases.append(defaultCase(72, 3, 9, [9, 2, 2, 2, 2, 2, 2, 2, 2]))
cases[-1]['casedir'] = 'design-72-92a-t3'
if allcases:
cases.append(defaultCase(72, 3, 5, [6, 6, 2, 2, 2]))
cases.append(defaultCase(72, 3, 9, [6, 3, 2, 2, 2, 2, 2, 2, 2]))
cases.append(defaultCase(80, 3, 10, [5, 4, 2, 2, 2, 2, 2, 2, 2, 2]))
cases[-1]['casedir'] = 'design-80-542a-t3'
cases.append(defaultCase(80, 3, 10, [2, 2, 2, 2, 2, 2, 2, 2]))
cases.append(defaultCase(81, 3, 6, [9, 3, 3, 3, 3, 3]))
cases[-1]['casedir'] = 'design-81-93a-t3'
cases.append(defaultCase(81, 3, 12, [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]))
cases[-1]['casedir'] = 'design-81-3a-t3'
if allcases:
cases.append(defaultCase(84, 3, 4, [7, 3, 2, 2]))
cases.append(defaultCase(88, 3, 14, [2]))
cases.append(defaultCase(90, 3, 4, [5, 3, 3, 2]))
cases.append(defaultCase(96, 3, 14, [2]))
cases.append(defaultCase(96, 3, 14, [3, 2]))
cases.append(defaultCase(100, 3, 4, [5, 5, 2, 2]))
cases.append(defaultCase(104, 3, 14, [2]))
cases.append(defaultCase(108, 3, 4, [3, 3, 2, 2]))
cases[-1]['casedir'] = 'design-108-3322-t3'
cases.append(defaultCase(108, 3, 5, [3, 3, 3, 2, 2]))
cases[-1]['casedir'] = 'design-108-33322-t3'
cases.append(defaultCase(108, 3, 4, [6, 3, 3, 2]))
cases[-1]['casedir'] = 'design-108-6332-t3'
cases.append(defaultCase(112, 3, 4, [7, 4, 2, 2]))
cases[-1]['casedir'] = 'design-112-7422-t3'
cases.append(defaultCase(120, 3, 4, [5, 4, 3, 2]))
cases[-1]['casedir'] = 'design-120-5432-t3'
cases.append(defaultCase(120, 3, 4, [6, 5, 2, 2]))
cases[-1]['casedir'] = 'design-120-6522-t3'
cases.append(defaultCase(150, 3, 4, [5, 5, 3, 2]))
cases[-1]['casedir'] = 'design-150-5532a-t3'
cases.append(defaultCase(176, 3, 14, [2]))
cases.append(defaultCase(16, 4, 6, [2, 2, 2, 2, 2, 2]))
if allcases:
cases.append(defaultCase(32, 4, 6, [4, 2, 2, 2, 2, 2]))
cases.append(defaultCase(32, 4, 7, [2, 2, 2, 2, 2, 2, 2]))
cases.append(defaultCase(48, 4, 5, [6, 2, 2, 2, 2]))
cases.append(defaultCase(48, 4, 7, [3, 2, 2, 2, 2, 2, 2]))
cases.append(defaultCase(48, 4, 6, [2, 2, 2, 2, 2, 2]))
cases.append(defaultCase(64, 4, 5, [8, 2, 2, 2, 2]))
cases.append(defaultCase(64, 4, 5, [4, 4, 2, 2, 2]))
cases.append(defaultCase(64, 4, 8, [4, 2, 2, 2, 2, 2, 2, 2]))
cases.append(defaultCase(64, 4, 9, [2, 2, 2, 2, 2, 2, 2, 2, 2]))
cases.append(defaultCase(72, 4, 5, [3, 3, 2, 2, 2]))
cases.append(defaultCase(80, 4, 6, [5, 2, 2, 2, 2, 2]))
if allcases:
cases.append(defaultCase(80, 4, 6, [2, 2, 2, 2, 2, 2]))
cases.append(defaultCase(81, 4, 5, [3, 3, 3, 3, 3]))
cases.append(defaultCase(96, 4, 6, [6, 4, 2, 2, 2, 2]))
cases.append(defaultCase(96, 4, 7, [6, 2, 2, 2, 2, 2, 2]))
cases.append(defaultCase(96, 4, 6, [4, 3, 2, 2, 2, 2]))
cases.append(defaultCase(96, 4, 8, [4, 2, 2, 2, 2, 2, 2, 2]))
cases.append(defaultCase(96, 4, 9, [3, 2, 2, 2, 2, 2, 2, 2, 2]))
cases.append(defaultCase(96, 4, 8, [2, 2, 2, 2, 2, 2, 2, 2]))
cases.append(defaultCase(108, 4, 5, [3, 3, 3, 2, 2]))
cases.append(defaultCase(112, 4, 6, [7, 2, 2, 2, 2, 2]))
cases.append(defaultCase(112, 4, 6, [2, 2, 2, 2, 2, 2]))
cases.append(defaultCase(120, 4, 5, [5, 3, 2, 2, 2]))
cases.append(defaultCase(128, 4, 6, [16, 2, 2, 2, 2, 2]))
cases[-1]['casedir'] = 'design-128-162a-t4'
cases.append(defaultCase(128, 4, 12, [8, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]))
cases[-1]['casedir'] = 'design-128-82a-t4'
cases.append(defaultCase(128, 4, 12, [8, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]))
cases[-1]['casedir'] = 'design-128-842a'
cases.append(defaultCase(128, 4, 8, [4, 4, 4, 2, 2, 2, 2, 2]))
cases.append(defaultCase(128, 4, 9, [4, 4, 2, 2, 2, 2, 2, 2, 2]))
cases.append(
defaultCase(128, 4, 16, [4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]))
cases[-1]['casedir'] = 'design-128-42a'
cases.append(
defaultCase(128, 4, 18, [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]))
cases[-1]['casedir'] = 'design-128-2a'
cases.append(defaultCase(144, 4, 11, [9, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]))
cases[-1]['casedir'] = 'design-144-92a'
cases.append(defaultCase(144, 4, 11, [6, 6, 2, 2, 2, 2, 2, 2, 2, 2, 2]))
cases[-1]['casedir'] = 'design-144-662a'
if allcases:
cases.append(defaultCase(144, 4, 6, [6, 2, 2, 2, 2, 2]))
cases.append(defaultCase(144, 4, 6, [6, 3, 2, 2, 2, 2]))
cases.append(defaultCase(144, 4, 5, [4, 3, 3, 2, 2]))
cases.append(defaultCase(144, 4, 10, [3, 3, 2, 2, 2, 2, 2, 2, 2, 2]))
cases[-1]['casedir'] = 'design-144-332a-t4'
cases.append(defaultCase(144, 4, 8, [3, 2, 2, 2, 2, 2, 2, 2]))
cases[-1]['casedir'] = 'design-144-32a'
cases.append(
defaultCase(144, 4, 12, [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]))
cases[-1]['casedir'] = 'design-144-2a'
cases.append(defaultCase(160, 4, 7, [10, 2, 2, 2, 2, 2, 2]))
cases[-1]['casedir'] = 'design-160-102a-t4'
cases.append(defaultCase(160, 4, 8, [5, 4, 2, 2, 2, 2, 2, 2]))
cases[-1]['casedir'] = 'design-160-542a-t4'
cases.append(
defaultCase(160, 4, 11, [4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]))
cases[-1]['casedir'] = 'design-160-42a-t4'
cases.append(defaultCase(160, 4, 10, [5, 2, 2, 2, 2, 2, 2, 2, 2, 2]))
cases[-1]['casedir'] = 'design-160-52a-t4'
cases.append(defaultCase(160, 4, 11, [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]))
cases[-1]['casedir'] = 'design-160-2a-t4'
if allcases:
cases.append(defaultCase(162, 4, 6, [3, 3, 3, 3, 3, 3]))
cases.append(defaultCase(162, 4, 6, [3, 3, 3, 3, 3, 2]))
cases.append(defaultCase(162, 4, 5, [3, 3, 3, 3, 2]))
cases.append(defaultCase(168, 4, 5, [7, 3, 2, 2, 2]))
cases.append(defaultCase(176, 4, 14, [2]))
cases.append(defaultCase(180, 4, 5, [5, 3, 3, 2, 2]))
cases.append(defaultCase(200, 4, 5, [5, 5, 2, 2, 2]))
cases.append(defaultCase(216, 4, 5, [4, 3, 3, 3, 2]))
cases.append(defaultCase(216, 4, 5, [6, 3, 3, 2, 2]))
cases.append(defaultCase(252, 4, 5, [7, 3, 3, 2, 2]))
cases.append(defaultCase(256, 4, 6, [4, 4, 4, 4, 4, 4]))
cases[-1]['casedir'] = 'design-256-4a-t4'
cases.append(defaultCase(216, 4, 6, [3, 3, 3, 2, 2, 2]))
cases[-1]['casedir'] = 'design-216-3332a-t4'
cases.append(
defaultCase(256, 4, 14, [4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2]))
cases[-1]['casedir'] = 'design-256-444442a-t4'
# strength 5 and higher
# cases.append(defaultCase(16, 4, 5, [ 2,2,2,2,2 ]))
cases.append(defaultCase(32, 5, 7, [2, 2, 2, 2, 2, 2, 2]))
cases.append(defaultCase(64, 5, 8, [2, 2, | |
import tensorflow as tf
import numpy as np
import datetime
import pickle
from music_parser import *
def get_time():
return datetime.datetime.now().strftime("%m-%d--%H-%M")
def get_batch(songs_parsed, one_hot_length, num_keys, batch_size, num_timesteps):
# create test data
# loop = np.array([
# [0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0],
# [0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1],
# [0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0],
# [0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0],
# [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0],
# [0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0],
# [0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0],
# [0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0]
# ])
# loop_10 = np.concatenate([loop for x in range(10)], axis=0)
# create lists to hold data, 0-dimension is batch
X_batch = []
y_batch = []
# iterate for batch size
for x in range(batch_size):
song = songs_parsed[np.random.randint(0, len(songs_parsed))]
# pick random starting index
random_starting_index = np.random.randint(0, len(song) - num_timesteps)
# y data is X shifted forward 1 timestep
X = song[random_starting_index:random_starting_index + num_timesteps]
y = song[random_starting_index + 1:random_starting_index + num_timesteps + 1]
# append batch list
X_batch.append(X)
y_batch.append(y)
X_batch = np.array(X_batch).reshape(batch_size, num_timesteps, one_hot_length*num_keys)
y_batch = np.array(y_batch).reshape(batch_size, num_timesteps, one_hot_length*num_keys)
return X_batch, y_batch
### WIP function for adding time indices to data
# def get_batch_with_time_index(songs_parsed, time_index_dict, time_index, one_hot_length, num_keys, batch_size, num_timesteps):
# # create lists to hold data, 0-dimension is batch
# X_batch = []
# y_batch = []
#
# time_index_dict_length = len(time_index_dict)
#
# # iterate for batch size
# for x in range(batch_size):
# song = songs_parsed[np.random.randint(0, len(songs_parsed))]
#
# # pick random starting index
# random_starting_index = np.random.randint(0, len(song) - num_timesteps)
#
# X_and_y = song[random_starting_index:random_starting_index + num_timesteps + 1]
#
# # y data is X shifted forward 1 timestep
# # X = song[random_starting_index:random_starting_index + num_timesteps]
# for Xy_time_index in range(len(X_and_y)):
# X_and_y[time_index] = np.append(X_and_y[time_index], time_index_dict[(Xy_time_index + time_index) % time_index_dict_length])
#
# X = X_and_y[:-1]
# y = X_and_y[1:]
#
# # append batch list
# X_batch.append(X)
# y_batch.append(y)
#
# # add the time index vectors if necessary
# if time_index_dict != {}:
# X_batch = np.array(X_batch).reshape(batch_size, num_timesteps, one_hot_length*num_keys + time_index_dict_length)
# y_batch = np.array(y_batch).reshape(batch_size, num_timesteps, one_hot_length*num_keys + time_index_dict_length)
# else:
# X_batch = np.array(X_batch).reshape(batch_size, num_timesteps, one_hot_length*num_keys)
# y_batch = np.array(y_batch).reshape(batch_size, num_timesteps, one_hot_length*num_keys)
#
# return X_batch, y_batch
def build_graph(ONEHOT_LENGTH, NUM_TIMESTEPS, NUM_NOTES, LEARNING_RATE, NETWORK_LAYERS):
# vanilla initializer
init = tf.global_variables_initializer()
# input for X and y data
notes_in_placeholder = tf.placeholder(tf.float32, [None, NUM_TIMESTEPS, NUM_NOTES * ONEHOT_LENGTH])
notes_out_placeholder = tf.placeholder(tf.float32, [None, NUM_TIMESTEPS, NUM_NOTES * ONEHOT_LENGTH])
cells = [tf.contrib.rnn.BasicLSTMCell(num_units=n, activation=tf.nn.tanh) for n in NETWORK_LAYERS]
stacked_rnn_cell = tf.contrib.rnn.MultiRNNCell(cells)
# wrap stacked lstm into dynamic_rnn wrapper
lstm_output, lstm_states = tf.nn.dynamic_rnn(stacked_rnn_cell, notes_in_placeholder, dtype=tf.float32)
# print lstm output
# lstm_output_p = tf.Print(lstm_output, [lstm_output], summarize=100)
# split_one_hots = tf.split(input_placeholder, one_hot_template, -1)
split_one_hots = tf.split(lstm_output, NUM_NOTES, -1)
# split_one_hots_p = tf.Print(split_one_hots, [split_one_hots], summarize=100)
# stack em up
split_one_hots_as_list = [split_one_hots[index] for index in range(NUM_NOTES)]
stacked_one_hots = tf.stack(split_one_hots_as_list, axis=-2)
# stacked_one_hots_p = tf.Print(stacked_one_hots, [stacked_one_hots], summarize=100)
# split and stack the y data to match stacked_one_hots
notes_out_tensor = tf.convert_to_tensor(notes_out_placeholder)
notes_out_split = tf.split(notes_out_tensor, NUM_NOTES, -1)
notes_out_stacked = tf.stack(notes_out_split, axis=-2)
# softmax with cross entropy is used for calculating loss during training
softmax_notes_with_CE = tf.nn.softmax_cross_entropy_with_logits(labels=notes_out_stacked, logits=stacked_one_hots)
# softmax output is used during
softmax_notes_output = tf.nn.softmax(stacked_one_hots)
# average output of softmax with cross entropy, optimize with Adam
loss = tf.reduce_mean(softmax_notes_with_CE)
optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE)
train = optimizer.minimize(loss)
return init, train, loss, notes_in_placeholder, notes_out_placeholder, softmax_notes_output
# training with real parsed music data
def train(PARSED_SONGS_PKL, MODEL_SAVE_DIR):
# network hyper parameters
NUM_NOTES = 60
ONEHOT_LENGTH = 7
NUM_TIMESTEPS = 64
BATCH_SIZE = 5
LEARNING_RATE = 0.001
NETWORK_LAYERS = [200, 200, ONEHOT_LENGTH*NUM_NOTES]
NUM_SAMPLES_TO_TRAIN = 10*1000000
SAVE_EVERY = 50000
### Needed if adding time index vectors
# if time_index_length != 0:
# # creates dictionary of one-hot vectors:
# # 0: [1, 0, 0, ...]
# # 1: [0, 1, 0, ...]
# # 2: [0, 0, 1, ...]
# time_index_dict = {i:[(0 if j != i else 1) for j in range(time_index_length)] for i in range(time_index_length)}
# else:
# time_index_dict = {}
# load parsed songs
with open(PARSED_SONGS_PKL, 'rb') as f:
songs_parsed = pickle.load(f)
# build graph
init, train, loss, notes_in_placeholder, notes_out_placeholder, softmax_notes_output = \
build_graph(ONEHOT_LENGTH, NUM_TIMESTEPS, NUM_NOTES, LEARNING_RATE, NETWORK_LAYERS)
# for saving the model
saver = tf.train.Saver(max_to_keep=100)
# start session
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print("Starting training at: " + get_time())
# create loss summary
log_filename = "../logs/test_more_bigger_layers_" + get_time()
print("saving loss to: " + log_filename)
writer = tf.summary.FileWriter(log_filename, sess.graph)
loss_summary = tf.summary.scalar('Loss', loss)
samples_trained = 0
while samples_trained < NUM_SAMPLES_TO_TRAIN:
# begin training routine
if (samples_trained % (SAVE_EVERY/10)) < BATCH_SIZE:
print("Trained " + str(samples_trained) + " samples: " + get_time())
X, y = get_batch(songs_parsed, ONEHOT_LENGTH, NUM_NOTES, BATCH_SIZE, NUM_TIMESTEPS)
# run graph and get batch loss
batch_loss, note_predictions, _ = sess.run([loss_summary, softmax_notes_output, train],
feed_dict={notes_in_placeholder: X, notes_out_placeholder: y})
# add summary to tensorboard logs
writer.add_summary(batch_loss, samples_trained)
# save model at increments
if samples_trained % SAVE_EVERY == 0:
variables_save_file = MODEL_SAVE_DIR + "test_more_bigger_layers_" + get_time()
print("saving model to: " + variables_save_file)
saver.save(sess, variables_save_file)
samples_trained += BATCH_SIZE
writer.close()
def generate(model_path_and_name, TEMP=0.08):
# network hyper parameters
NUM_NOTES = 60
ONEHOT_LENGTH = 7
NUM_TIMESTEPS = 64
BATCH_SIZE = 5
LEARNING_RATE = 0.001
NETWORK_LAYERS = [200, 200, ONEHOT_LENGTH*NUM_NOTES]
TIMESTEPS_TO_SAMPLE = 250
# load parsed songs to get some starter
PARSED_SONGS_PKL = "../data/7songs.pkl"
with open(PARSED_SONGS_PKL, 'rb') as f:
songs_parsed = pickle.load(f)
# build graph
init, train, loss, notes_in_placeholder, notes_out_placeholder, softmax_notes_output = \
build_graph(ONEHOT_LENGTH, NUM_TIMESTEPS, NUM_NOTES, LEARNING_RATE, NETWORK_LAYERS)
# for loading the model
saver = tf.train.Saver(max_to_keep=100)
# start session
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# restore our model
saver.restore(sess, model_path_and_name)
parser = MusicParser()
# use the beginning of a song as the starter
X, y = get_batch(songs_parsed, ONEHOT_LENGTH, NUM_NOTES, BATCH_SIZE, NUM_TIMESTEPS)
# the written_song variable will hold the starting notes and predicted notes
written_song = X[0]
timesteps_sampled = 0
while timesteps_sampled < TIMESTEPS_TO_SAMPLE:
timesteps_sampled += 1
feed_in = written_song[-NUM_TIMESTEPS:, :]
# run graph and get batch loss
note_predictions = sess.run([softmax_notes_output],
feed_dict={notes_in_placeholder: [feed_in]})
# grab last timestep
last_timestep = note_predictions[0][0][-1][:].reshape(-1)
# sample from timestep
for index_divided in range(int(len(last_timestep) / 7)):
starting_index = index_divided * 7
ending_index = starting_index + 7
distribution = last_timestep[starting_index:ending_index]
chosen_index = sample_note_from_softmax(distribution, temp=TEMP) + starting_index
for index in range(starting_index, ending_index):
if index == chosen_index:
last_timestep[index] = 1
else:
last_timestep[index] = 0
# append sampled last timestep
written_song = np.append(written_song, last_timestep).reshape(-1, ONEHOT_LENGTH*NUM_NOTES)
# time_series_to_midi(self, time_series, min_note, filepath):
parser.time_series_to_midi(written_song[:-1, :], 30, "../gen")
def sample_note_from_softmax(distribution, temp=0):
if temp != 0:
exp_sum = sum([np.exp(i/temp) for i in distribution])
distribution = [np.exp(i/temp)/exp_sum for i in distribution]
random_num = np.random.uniform(0, 1, 1)
curr_index = 0
curr_sum = distribution[curr_index]
while curr_sum < random_num:
curr_index += 1
curr_sum += distribution[curr_index]
return curr_index
# train(PARSED_SONGS_PKL="../data/50songs.pkl", MODEL_SAVE_DIR="../test_more_bigger_layers/")
generate("../test_more_bigger_layers/test_more_bigger_layers_12-21--08-35", TEMP=0.08)
# MusicParser.midis_to_time_series_pickle("../50_midis/", "../data/50songs")
"""
TO DO:
- re-rewrite sample function to actually sample
- sample before feeding back in
- get good starting notes: just used part of another song
- clean up code & push (why are there 2 "training" functions?)
- it doesnt know where it is in the bar...
what if we attached a "counting" one-hot vector at the end of note
(UPDATE: this is a huge pain with tf.stack b/c you must stack [7, 7, 7, 16] which is illegal)
- search for hyperparameters: num layers, num nodes, sample size / song choices
12/17/18
- where were we?
- most recent model: "../fifty_songs_models/data_set_search_10-04--21-42"
- we were screwing with hyper parameters, like training set, num timesteps,
- we found:
- more songs the merrier (we had just compiled 50)
- 64 timesteps looked good
- idk if we messed with num layers and num nodes, lets test that:
- currently: [7*60, 100, 7*60]
- more layers: [7*60, 100, 100, 7*60]
- more nodes: [7*60, 200, 7*60]
- we were setting the parameter to 10M, but it was mostly finished at 1M: set to 2M hard this time (aint nobody got time for that)
- train on dataset of 50 songs
| |
<filename>NitroFE/time_based_features/moving_average_features/moving_average_features.py
import pandas as pd
import numpy as np
from typing import Union, Callable
from NitroFE.time_based_features.weighted_window_features.weighted_window_features import (
weighted_window_features,
)
from NitroFE.time_based_features.weighted_window_features.weighted_windows import (
_equal_window,
_identity_window,
)
class ExponentialMovingFeature:
"""
Provided dataframe must be in ascending order.
"""
def __init__(
self,
alpha: float = None,
operation: str = "mean",
initialize_using_operation: bool = False,
initialize_span: int = None,
com: float = None,
span: int = None,
halflife: float = None,
min_periods: int = 0,
ignore_na: bool = False,
axis: int = 0,
times: str = None,
):
"""
Parameters
----------
alpha : float, optional
Specify smoothing factor directly, by default None
operation : str, {'mean','var','std'}
operation to be performed for the moving feature,available operations are 'mean','var','std', by default 'mean'
initialize_using_operation : bool, optional
If True, then specified 'operation' is performed on the first 'initialize_span' values, and then the exponential moving average is calculated, by default False
initialize_span : int, optional
the span over which 'operation' would be performed for initialization, by default None
com : float, optional
Specify decay in terms of center of mass, by default None
span : float, optional
specify decay in terms of span , by default None
halflife : float, optional
Specify decay in terms of half-life, by default None
min_periods : int, optional
Minimum number of observations in window required to have a value (otherwise result is NA), by default 0
ignore_na : bool, optional
Ignore missing values when calculating weights; specify True to reproduce pre-0.15.0 behavior, by default False
axis : int, optional
The axis to use. The value 0 identifies the rows, and 1 identifies the columns, by default 0
times : str, optional
Times corresponding to the observations. Must be monotonically increasing and datetime64[ns] dtype, by default None
"""
self.com = com
self.span = span
self.halflife = halflife
self.alpha = alpha
self.min_periods = min_periods if min_periods != None else 0
self.adjust = False
self.ignore_na = ignore_na
self.axis = axis
self.times = times
self.operation = operation
self.last_values_from_previous_run = None
self.initialize_using_operation = initialize_using_operation
self.initialize_span = initialize_span
def _perform_temp_operation(self, x):
if self.operation == "mean":
_return = x.mean()
elif self.operation == "var":
_return = x.var()
elif self.operation == "std":
_return = x.std()
else:
raise ValueError(f"Operation {self.operation} not supported")
return _return
def fit(self, dataframe: Union[pd.DataFrame, pd.Series], first_fit: bool = True):
"""
For your training/initial fit phase (very first fit) use fit_first=True, and for any production/test implementation pass fit_first=False
Parameters
----------
dataframe : Union[pd.DataFrame, pd.Series]
dataframe containing column values to create exponential moving feature over
first_fit : bool, optional
Moving features require past values for calculation.
Use True, when calculating for training data (very first fit)
Use False, when calculating for subsequent testing/production data { in which case the values, which
were saved during the last phase, will be utilized for calculation }, by default True
"""
if not first_fit:
if self.last_values_from_previous_run is None:
raise ValueError(
"First fit has not occured before. Kindly run first_fit=True for first fit instance,"
"and then proceed with first_fit=False for subsequent fits "
)
self.adjust = False
dataframe = pd.concat(
[self.last_values_from_previous_run, dataframe], axis=0
)
else:
if self.initialize_using_operation:
self.min_periods = 0
if (self.initialize_span is None) and (self.span is None):
raise ValueError(
"For initialize_using_operation=True,"
"either initialize_span or span value is required"
)
elif (self.initialize_span is None) and (self.span is not None):
self.initialize_span = self.span
first_frame = self._perform_temp_operation(
dataframe[: self.initialize_span].rolling(
window=self.initialize_span
)
)
dataframe = pd.concat([first_frame, dataframe[self.initialize_span :]])
else:
if self.initialize_span is not None:
raise ValueError(
"In order to use initialize_span, initialize_using_operation must be True"
)
_dataframe = dataframe.ewm(
com=self.com,
span=self.span,
halflife=self.halflife,
alpha=self.alpha,
min_periods=self.min_periods,
adjust=self.adjust,
ignore_na=self.ignore_na,
axis=self.axis,
times=self.times,
)
_return = self._perform_temp_operation(_dataframe)
if not first_fit:
_return = _return.iloc[1:]
self.last_values_from_previous_run = _return.iloc[-1:]
return _return
class HullMovingFeature:
"""
Provided dataframe must be in ascending order.
"""
def __init__(
self, window: int = 4, min_periods: int = 1, operation: Callable = None
):
"""
Parameters
----------
window : int, optional
Size of the rolling window, by default 3
min_periods : int, optional
Minimum number of observations in window required to have a value, by default 1
operation : Callable, optional
operation to perform over the weighted rolling window values, when None is passed, np.mean is used
"""
self.window = window
self.min_periods = min_periods
operation = np.mean if operation == None else operation
self.operation = operation
if self.window <= 1:
raise ValueError(f"window size less than equal to 1 not supported")
self.window_by_two, self.window_square_root = int(
np.ceil(self.window / 2)
), int(np.ceil(np.sqrt(self.window)))
def fit(self, dataframe: Union[pd.DataFrame, pd.Series], first_fit: bool = True):
"""
For your training/initial fit phase (very first fit) use fit_first=True, and for any production/test implementation pass fit_first=False
Parameters
----------
dataframe : Union[pd.DataFrame,pd.Series]
dataframe/series over which feature is to be constructed
first_fit : bool, optional
Moving features require past values for calculation.
Use True, when calculating for training data (very first fit)
Use False, when calculating for any subsequent testing/production data { in which case the values, which
were saved during the last phase, will be utilized for calculation }, by default True
"""
if first_fit:
self._window_size_weighted_moving_average_object = (
weighted_window_features()
)
self._window_by_two_size_weighted_moving_average_object = (
weighted_window_features()
)
self._hma_object = weighted_window_features()
window_size_weighted_moving_average = self._window_size_weighted_moving_average_object.caluclate_weighted_moving_window_feature(
dataframe=dataframe,
first_fit=first_fit,
window=self.window,
min_periods=self.min_periods,
operation=self.operation,
)
window_by_two_size_weighted_moving_average = self._window_by_two_size_weighted_moving_average_object.caluclate_weighted_moving_window_feature(
dataframe=dataframe,
first_fit=first_fit,
window=self.window_by_two,
min_periods=self.min_periods,
operation=self.operation,
)
raw_hma = (
2 * window_by_two_size_weighted_moving_average
- window_size_weighted_moving_average
)
hma = self._hma_object.caluclate_weighted_moving_window_feature(
dataframe=raw_hma,
first_fit=first_fit,
window=self.window_square_root,
min_periods=self.min_periods,
operation=self.operation,
)
return hma
class _a_kaufman_efficiency:
def __init__(self):
pass
def _calculate_kaufman_efficiency(self, x):
up = np.abs(x.iloc[-1] - x.iloc[0])
down = np.abs(x.diff().fillna(0)).sum()
if down == 0:
return 0
else:
return up / down
def fit(
self,
dataframe: Union[pd.DataFrame, pd.Series],
first_fit: bool = True,
lookback_period: int = 4,
min_periods: int = None,
):
if first_fit:
self._kaufman_efficiency_object = weighted_window_features()
self.lookback_period = lookback_period
self.min_periods = min_periods
_kaufman_efficiency = (
self._kaufman_efficiency_object._template_feature_calculation(
function_name="kaufman_efficiency",
win_function=_identity_window,
first_fit=first_fit,
dataframe=dataframe,
window=self.lookback_period,
min_periods=self.min_periods,
symmetric=None,
operation=self._calculate_kaufman_efficiency,
operation_args=(),
)
)
return _kaufman_efficiency
class KaufmanAdaptiveMovingAverage:
"""
Provided dataframe must be in ascending order.
"""
def __init__(
self,
kaufman_efficiency_lookback_period: int = 4,
kaufman_efficiency_min_periods: int = None,
fast_ema_span: int = 2,
slow_ema_span: int = 5,
):
"""
Parameters
----------
kaufman_efficiency_lookback_period : int, optional
Size of the rolling window of lookback for the caluculation of kaufman efficiency ratio , by default 4
kaufman_efficiency_min_periods : int, optional
Minimum number of observations in window required to have a value for kaufman efficiency ratio, by default 1
fast_ema_span : int, optional
fast span length, by default 2
slow_ema_span : int, optional
slow span length, by default 5
"""
self.kaufman_efficiency_lookback_period = kaufman_efficiency_lookback_period
self.kaufman_efficiency_min_periods = kaufman_efficiency_min_periods
self.fast_ema_span = fast_ema_span
self.slow_ema_span = slow_ema_span
def fit(self, dataframe: Union[pd.DataFrame, pd.Series], first_fit: bool = True):
"""
For your training/initial fit phase (very first fit) use fit_first=True, and for any production/test implementation pass fit_first=False
Parameters
----------
dataframe : Union[pd.DataFrame, pd.Series]
dataframe containing column values to create feature over
first_fit : bool, optional
Moving features require past values for calculation.
Use True, when calculating for training data (very first fit)
Use False, when calculating for subsequent testing/production data { in which case the values, which
were saved during the last phase, will be utilized for calculation }, by default True
"""
if first_fit:
self._kaufman_object = _a_kaufman_efficiency()
if isinstance(dataframe, pd.Series):
dataframe = dataframe.to_frame()
kma = pd.DataFrame(
np.zeros(dataframe.shape), columns=dataframe.columns, index=dataframe.index
)
if first_fit:
kma = pd.concat(
[pd.DataFrame(np.zeros((1, kma.shape[1])), columns=kma.columns), kma]
)
if self.kaufman_efficiency_min_periods == None:
_first_pervious = self.kaufman_efficiency_lookback_period - 2
elif self.kaufman_efficiency_min_periods > 1:
_first_pervious = self.kaufman_efficiency_min_periods - 2
else:
_first_pervious = 0
else:
kma = pd.concat([self.values_from_last_run, kma])
_first_pervious = -1
kma["_iloc"] = np.arange(len(kma))
_kaufman_efficiency = self._kaufman_object.fit(
dataframe=dataframe,
first_fit=first_fit,
lookback_period=self.kaufman_efficiency_lookback_period,
min_periods=self.kaufman_efficiency_min_periods,
)
ll = [x for x in kma.columns if x != "_iloc"]
SC = (
_kaufman_efficiency.copy()
* (2 / (self.fast_ema_span + 1) - 2 / (self.slow_ema_span + 1))
+ 2 / (self.slow_ema_span + 1)
) ** 2
if first_fit:
SC.iloc[_first_pervious] = [0] * SC.shape[1]
for r1, r2, r3 in zip(
dataframe[(_first_pervious + 1) :].iterrows(),
kma[(1 + _first_pervious + 1) :].iterrows(),
SC[(_first_pervious + 1) :].iterrows(),
):
previous_kama = kma[kma["_iloc"] == (r2[1]["_iloc"] - 1)][ll]
kma.loc[kma["_iloc"] == r2[1]["_iloc"], ll] = (
previous_kama
+ | |
#!/usr/bin/env python3
""" Base class for Face Detector plugins
Plugins should inherit from this class
See the override methods for which methods are
required.
For each source frame, the plugin must pass a dict to finalize containing:
{"filename": <filename of source frame>,
"image": <source image>,
"detected_faces": <list of BoundingBoxes>} (Class defined in /lib/faces_detect)
"""
import logging
import os
import traceback
from io import StringIO
import cv2
from lib.faces_detect import BoundingBox
from lib.gpu_stats import GPUStats
from lib.utils import rotate_landmarks, GetModel
from plugins.extract._config import Config
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def get_config(plugin_name):
""" Return the config for the requested model """
return Config(plugin_name).config_dict
class Detector():
""" Detector object """
def __init__(self, loglevel,
git_model_id=None, model_filename=None, rotation=None, min_size=0):
logger.debug("Initializing %s: (loglevel: %s, git_model_id: %s, model_filename: %s, "
"rotation: %s, min_size: %s)", self.__class__.__name__, loglevel,
git_model_id, model_filename, rotation, min_size)
self.config = get_config(".".join(self.__module__.split(".")[-2:]))
self.loglevel = loglevel
self.rotation = self.get_rotation_angles(rotation)
self.min_size = min_size
self.parent_is_pool = False
self.init = None
self.error = None
# The input and output queues for the plugin.
# See lib.queue_manager.QueueManager for getting queues
self.queues = {"in": None, "out": None}
# Path to model if required
self.model_path = self.get_model(git_model_id, model_filename)
# Target image size for passing images through the detector
# Set to tuple of dimensions (x, y) or int of pixel count
self.target = None
# Approximate VRAM used for the set target. Used to calculate
# how many parallel processes / batches can be run.
# Be conservative to avoid OOM.
self.vram = None
# For detectors that support batching, this should be set to
# the calculated batch size that the amount of available VRAM
# will support. It is also used for holding the number of threads/
# processes for parallel processing plugins
self.batch_size = 1
logger.debug("Initialized _base %s", self.__class__.__name__)
# <<< OVERRIDE METHODS >>> #
def initialize(self, *args, **kwargs):
""" Inititalize the detector
Tasks to be run before any detection is performed.
Override for specific detector """
logger.debug("initialize %s (PID: %s, args: %s, kwargs: %s)",
self.__class__.__name__, os.getpid(), args, kwargs)
self.init = kwargs.get("event", False)
self.error = kwargs.get("error", False)
self.queues["in"] = kwargs["in_queue"]
self.queues["out"] = kwargs["out_queue"]
def detect_faces(self, *args, **kwargs):
""" Detect faces in rgb image
Override for specific detector
Must return a list of BoundingBox's"""
try:
if not self.init:
self.initialize(*args, **kwargs)
except ValueError as err:
logger.error(err)
exit(1)
logger.debug("Detecting Faces (args: %s, kwargs: %s)", args, kwargs)
# <<< GET MODEL >>> #
@staticmethod
def get_model(git_model_id, model_filename):
""" Check if model is available, if not, download and unzip it """
if model_filename is None:
logger.debug("No model_filename specified. Returning None")
return None
if git_model_id is None:
logger.debug("No git_model_id specified. Returning None")
return None
cache_path = os.path.join(os.path.dirname(__file__), ".cache")
model = GetModel(model_filename, cache_path, git_model_id)
return model.model_path
# <<< DETECTION WRAPPER >>> #
def run(self, *args, **kwargs):
""" Parent detect process.
This should always be called as the entry point so exceptions
are passed back to parent.
Do not override """
try:
logger.debug("Executing detector run function")
self.detect_faces(*args, **kwargs)
except Exception as err: # pylint: disable=broad-except
logger.error("Caught exception in child process: %s: %s", os.getpid(), str(err))
# Display traceback if in initialization stage
if not self.init.is_set():
logger.exception("Traceback:")
tb_buffer = StringIO()
traceback.print_exc(file=tb_buffer)
logger.trace(tb_buffer.getvalue())
exception = {"exception": (os.getpid(), tb_buffer)}
self.queues["out"].put(exception)
exit(1)
# <<< FINALIZE METHODS>>> #
def finalize(self, output):
""" This should be called as the final task of each plugin
Performs fianl processing and puts to the out queue """
if isinstance(output, dict):
logger.trace("Item out: %s", {key: val
for key, val in output.items()
if key != "image"})
if self.min_size > 0 and output.get("detected_faces", None):
output["detected_faces"] = self.filter_small_faces(output["detected_faces"])
else:
logger.trace("Item out: %s", output)
self.queues["out"].put(output)
def filter_small_faces(self, detected_faces):
""" Filter out any faces smaller than the min size threshold """
retval = list()
for face in detected_faces:
face_size = (face.width ** 2 + face.height ** 2) ** 0.5
if face_size < self.min_size:
logger.debug("Removing detected face: (face_size: %s, min_size: %s",
face_size, self.min_size)
continue
retval.append(face)
return retval
# <<< DETECTION IMAGE COMPILATION METHODS >>> #
def compile_detection_image(self, input_image,
is_square=False, scale_up=False, to_rgb=False, to_grayscale=False):
""" Compile the detection image """
image = input_image.copy()
if to_rgb:
image = image[:, :, ::-1]
elif to_grayscale:
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # pylint: disable=no-member
scale = self.set_scale(image, is_square=is_square, scale_up=scale_up)
image = self.scale_image(image, scale)
return [image, scale]
def set_scale(self, image, is_square=False, scale_up=False):
""" Set the scale factor for incoming image """
height, width = image.shape[:2]
if is_square:
if isinstance(self.target, int):
dims = (self.target ** 0.5, self.target ** 0.5)
self.target = dims
source = max(height, width)
target = max(self.target)
else:
source = (width * height) ** 0.5
if isinstance(self.target, tuple):
self.target = self.target[0] * self.target[1]
target = self.target ** 0.5
if scale_up or target < source:
scale = target / source
else:
scale = 1.0
logger.trace("Detector scale: %s", scale)
return scale
@staticmethod
def scale_image(image, scale):
""" Scale the image """
# pylint: disable=no-member
if scale == 1.0:
return image
height, width = image.shape[:2]
interpln = cv2.INTER_LINEAR if scale > 1.0 else cv2.INTER_AREA
dims = (int(width * scale), int(height * scale))
if scale < 1.0:
logger.trace("Resizing image from %sx%s to %s.",
width, height, "x".join(str(i) for i in dims))
image = cv2.resize(image, dims, interpolation=interpln)
return image
# <<< IMAGE ROTATION METHODS >>> #
@staticmethod
def get_rotation_angles(rotation):
""" Set the rotation angles. Includes backwards compatibility for the
'on' and 'off' options:
- 'on' - increment 90 degrees
- 'off' - disable
- 0 is prepended to the list, as whatever happens, we want to
scan the image in it's upright state """
rotation_angles = [0]
if not rotation or rotation.lower() == "off":
logger.debug("Not setting rotation angles")
return rotation_angles
if rotation.lower() == "on":
rotation_angles.extend(range(90, 360, 90))
else:
passed_angles = [int(angle)
for angle in rotation.split(",")]
if len(passed_angles) == 1:
rotation_step_size = passed_angles[0]
rotation_angles.extend(range(rotation_step_size,
360,
rotation_step_size))
elif len(passed_angles) > 1:
rotation_angles.extend(passed_angles)
logger.debug("Rotation Angles: %s", rotation_angles)
return rotation_angles
def rotate_image(self, image, angle):
""" Rotate the image by given angle and return
Image with rotation matrix """
if angle == 0:
return image, None
return self.rotate_image_by_angle(image, angle)
@staticmethod
def rotate_rect(bounding_box, rotation_matrix):
""" Rotate a BoundingBox based on the rotation_matrix"""
logger.trace("Rotating BoundingBox")
bounding_box = rotate_landmarks(bounding_box, rotation_matrix)
return bounding_box
@staticmethod
def rotate_image_by_angle(image, angle,
rotated_width=None, rotated_height=None):
""" Rotate an image by a given angle.
From: https://stackoverflow.com/questions/22041699 """
logger.trace("Rotating image: (angle: %s, rotated_width: %s, rotated_height: %s)",
angle, rotated_width, rotated_height)
height, width = image.shape[:2]
image_center = (width/2, height/2)
rotation_matrix = cv2.getRotationMatrix2D( # pylint: disable=no-member
image_center, -1.*angle, 1.)
if rotated_width is None or rotated_height is None:
abs_cos = abs(rotation_matrix[0, 0])
abs_sin = abs(rotation_matrix[0, 1])
if rotated_width is None:
rotated_width = int(height*abs_sin + width*abs_cos)
if rotated_height is None:
rotated_height = int(height*abs_cos + width*abs_sin)
rotation_matrix[0, 2] += rotated_width/2 - image_center[0]
rotation_matrix[1, 2] += rotated_height/2 - image_center[1]
logger.trace("Rotated image: (rotation_matrix: %s", rotation_matrix)
return (cv2.warpAffine(image, # pylint: disable=no-member
rotation_matrix,
(rotated_width, rotated_height)),
rotation_matrix)
# << QUEUE METHODS >> #
def get_item(self):
""" Yield one item from the queue """
item = self.queues["in"].get()
if isinstance(item, dict):
logger.trace("Item in: %s", item["filename"])
else:
logger.trace("Item in: %s", item)
if item == "EOF":
logger.debug("In Queue Exhausted")
# Re-put EOF into queue for other threads
self.queues["in"].put(item)
return item
def get_batch(self):
""" Get items from the queue in batches of
self.batch_size
First item in output tuple indicates whether the
queue is exhausted.
Second item is the batch
Remember to put "EOF" to the out queue after processing
the final batch """
exhausted = False
batch = list()
for _ in range(self.batch_size):
item = self.get_item()
if item == "EOF":
exhausted = True
break
batch.append(item)
logger.trace("Returning batch size: %s", len(batch))
return (exhausted, batch)
# <<< MISC METHODS >>> #
@staticmethod
def get_vram_free():
""" Return free and total VRAM on card with most VRAM free"""
stats = GPUStats()
vram = stats.get_card_most_free()
logger.verbose("Using device %s with %sMB free of %sMB",
vram["device"],
int(vram["free"]),
int(vram["total"]))
return int(vram["card_id"]), int(vram["free"]), int(vram["total"])
@staticmethod
def set_predetected(width, height):
""" Set a BoundingBox for predetected faces """
# Predetected_face is used for sort tool.
# Landmarks should not be extracted again from predetected faces,
# because face data is lost, resulting in a large | |
do nothing
else:
if not common.isNum(value):
raise MeterException('weight values must be numbers')
try:
totalRatio = self._numerator / self._denominator
except TypeError:
raise MeterException(
'Something wrong with the type of '
+ 'this numerator %s %s or this denominator %s %s' %
(self._numerator, type(self._numerator),
self._denominator, type(self._denominator)))
for mt in self._partition:
# for mt in self:
partRatio = mt._numerator / mt._denominator
mt.weight = value * (partRatio / totalRatio)
# mt.weight = (partRatio/totalRatio) #* totalRatio
# environLocal.printDebug(['setting weight based on part, total, weight',
# partRatio, totalRatio, mt.weight])
@property
def numerator(self):
return self._numerator
@property
def denominator(self):
return self._denominator
def _getFlatList(self):
'''Return a flat version of this MeterSequence as a list of MeterTerminals.
This return a list and not a new MeterSequence b/c MeterSequence objects
are generally immutable and thus it does not make sense
to concatenate them.
>>> a = meter.MeterSequence('3/4')
>>> a.partition(3)
>>> b = a._getFlatList()
>>> len(b)
3
>>> a[1] = a[1].subdivide(4)
>>> a
<music21.meter.core.MeterSequence {1/4+{1/16+1/16+1/16+1/16}+1/4}>
>>> len(a)
3
>>> b = a._getFlatList()
>>> len(b)
6
>>> a[1][2] = a[1][2].subdivide(4)
>>> a
<music21.meter.core.MeterSequence {1/4+{1/16+1/16+{1/64+1/64+1/64+1/64}+1/16}+1/4}>
>>> b = a._getFlatList()
>>> len(b)
9
'''
mtList = []
for obj in self._partition:
if not isinstance(obj, MeterSequence):
mtList.append(obj)
else: # its a meter sequence
mtList += obj._getFlatList()
return mtList
@property
def flat(self):
'''
Return a new MeterSequence composed of the flattened representation.
>>> ms = meter.MeterSequence('3/4', 3)
>>> b = ms.flat
>>> len(b)
3
>>> ms[1] = ms[1].subdivide(4)
>>> b = ms.flat
>>> len(b)
6
>>> ms[1][2] = ms[1][2].subdivide(4)
>>> ms
<music21.meter.core.MeterSequence {1/4+{1/16+1/16+{1/64+1/64+1/64+1/64}+1/16}+1/4}>
>>> b = ms.flat
>>> len(b)
9
'''
post = MeterSequence()
post.load(self._getFlatList())
return post
@property
def flatWeight(self):
'''
Return a list of flat weight values
'''
post = []
for mt in self._getFlatList():
post.append(mt.weight)
return post
@property
def depth(self):
'''
Return how many unique levels deep this part is
This should be optimized to store values unless the structure has changed.
'''
depth = 0 # start with 0, will count this level
lastMatch = None
while True:
test = self.getLevelList(depth)
if test != lastMatch:
depth += 1
lastMatch = test
else:
break
return depth
def isUniformPartition(self, *, depth=0):
# noinspection PyShadowingNames
'''
Return True if the top-level partitions (if depth=0)
or a lower-level section has equal durations
>>> ms = meter.MeterSequence('3/8+2/8+3/4')
>>> ms.isUniformPartition()
False
>>> ms = meter.MeterSequence('4/4')
>>> ms.isUniformPartition()
True
>>> ms.partition(4)
>>> ms.isUniformPartition()
True
>>> ms[0] = ms[0].subdivideByCount(2)
>>> ms[1] = ms[1].subdivideByCount(4)
>>> ms.isUniformPartition()
True
>>> ms.isUniformPartition(depth=1)
False
>>> ms = meter.MeterSequence('2/4+2/4')
>>> ms.isUniformPartition()
True
>>> ms = meter.MeterSequence('5/8', 5)
>>> ms.isUniformPartition()
True
>>> ms.partition(2)
>>> ms.isUniformPartition()
False
Changed in v7 -- depth is keyword only
'''
n = []
d = []
for ms in self.getLevelList(depth):
if ms.numerator not in n:
n.append(ms.numerator)
if ms.denominator not in d:
d.append(ms.denominator)
# as soon as we have more than on entry, we do not have uniform
if len(n) > 1 or len(d) > 1:
return False
return True
# --------------------------------------------------------------------------
# alternative representations
def getLevelList(self, levelCount, flat=True):
'''
Recursive utility function that gets everything at a certain level.
>>> b = meter.MeterSequence('4/4', 4)
>>> b[1] = b[1].subdivide(2)
>>> b[3] = b[3].subdivide(2)
>>> b[3][0] = b[3][0].subdivide(2)
>>> b
<music21.meter.core.MeterSequence {1/4+{1/8+1/8}+1/4+{{1/16+1/16}+1/8}}>
>>> b.getLevelList(0)
[<music21.meter.core.MeterTerminal 1/4>,
<music21.meter.core.MeterTerminal 1/4>,
<music21.meter.core.MeterTerminal 1/4>,
<music21.meter.core.MeterTerminal 1/4>]
>>> meter.MeterSequence(b.getLevelList(0))
<music21.meter.core.MeterSequence {1/4+1/4+1/4+1/4}>
>>> meter.MeterSequence(b.getLevelList(1))
<music21.meter.core.MeterSequence {1/4+1/8+1/8+1/4+1/8+1/8}>
>>> meter.MeterSequence(b.getLevelList(2))
<music21.meter.core.MeterSequence {1/4+1/8+1/8+1/4+1/16+1/16+1/8}>
>>> meter.MeterSequence(b.getLevelList(3))
<music21.meter.core.MeterSequence {1/4+1/8+1/8+1/4+1/16+1/16+1/8}>
'''
cacheKey = (levelCount, flat)
try: # check in cache
return self._levelListCache[cacheKey]
except KeyError:
pass
mtList = []
for i in range(len(self._partition)):
# environLocal.printDebug(['getLevelList weight', i, self[i].weight])
if not isinstance(self._partition[i], MeterSequence):
mt = self[i] # a meter terminal
mtList.append(mt)
else: # its a sequence
if levelCount > 0: # retain this sequence but get lower level
# reduce level by 1 when recursing; do not
# change levelCount here
mtList += self._partition[i].getLevelList(
levelCount - 1, flat)
else: # level count is at zero
if flat: # make sequence into a terminal
mt = MeterTerminal('%s/%s' % (
self._partition[i].numerator, self._partition[i].denominator))
# set weight to that of the sequence
mt.weight = self._partition[i].weight
mtList.append(mt)
else: # its not a terminal, its a meter sequence
mtList.append(self._partition[i])
# store in cache
self._levelListCache[cacheKey] = mtList
return mtList
def getLevel(self, level=0, flat=True):
'''
Return a complete MeterSequence with the same numerator/denominator
relationship but that represents any partitions found at the requested
level. A sort of flatness with variable depth.
>>> b = meter.MeterSequence('4/4', 4)
>>> b[1] = b[1].subdivide(2)
>>> b[3] = b[3].subdivide(2)
>>> b[3][0] = b[3][0].subdivide(2)
>>> b
<music21.meter.core.MeterSequence {1/4+{1/8+1/8}+1/4+{{1/16+1/16}+1/8}}>
>>> b.getLevel(0)
<music21.meter.core.MeterSequence {1/4+1/4+1/4+1/4}>
>>> b.getLevel(1)
<music21.meter.core.MeterSequence {1/4+1/8+1/8+1/4+1/8+1/8}>
>>> b.getLevel(2)
<music21.meter.core.MeterSequence {1/4+1/8+1/8+1/4+1/16+1/16+1/8}>
'''
return MeterSequence(self.getLevelList(level, flat))
def getLevelSpan(self, level=0):
'''
For a given level, return the time span of each terminal or sequence
>>> b = meter.MeterSequence('4/4', 4)
>>> b[1] = b[1].subdivide(2)
>>> b[3] = b[3].subdivide(2)
>>> b[3][0] = b[3][0].subdivide(2)
>>> b
<music21.meter.core.MeterSequence {1/4+{1/8+1/8}+1/4+{{1/16+1/16}+1/8}}>
>>> b.getLevelSpan(0)
[(0.0, 1.0), (1.0, 2.0), (2.0, 3.0), (3.0, 4.0)]
>>> b.getLevelSpan(1)
[(0.0, 1.0), (1.0, 1.5), (1.5, 2.0), (2.0, 3.0), (3.0, 3.5), (3.5, 4.0)]
>>> b.getLevelSpan(2)
[(0.0, 1.0), (1.0, 1.5), (1.5, 2.0), (2.0, 3.0), (3.0, 3.25), (3.25, 3.5), (3.5, 4.0)]
'''
ms = self.getLevelList(level, flat=True)
mapping = []
pos = 0.0
for i in range(len(ms)):
start = pos
end = opFrac(pos + ms[i].duration.quarterLength)
mapping.append((start, end))
pos = end
return mapping
def getLevelWeight(self, level=0):
'''
The weightList is an array of weights found in the components.
The MeterSequence has a ._weight attribute, but it is not used here
>>> a = meter.MeterSequence('4/4', 4)
>>> a.getLevelWeight()
[0.25, 0.25, 0.25, 0.25]
>>> b = meter.MeterSequence('4/4', 4)
>>> b.getLevelWeight(0)
[0.25, 0.25, 0.25, 0.25]
>>> b[1] = b[1].subdivide(2)
>>> b[3] = b[3].subdivide(2)
>>> b.getLevelWeight(0)
[0.25, 0.25, 0.25, 0.25]
>>> b[3][0] = b[3][0].subdivide(2)
>>> b
<music21.meter.core.MeterSequence {1/4+{1/8+1/8}+1/4+{{1/16+1/16}+1/8}}>
>>> b.getLevelWeight(0)
[0.25, 0.25, 0.25, 0.25]
>>> b.getLevelWeight(1)
[0.25, 0.125, 0.125, 0.25, 0.125, 0.125]
>>> b.getLevelWeight(2)
[0.25, 0.125, 0.125, 0.25, 0.0625, 0.0625, 0.125]
'''
post = []
for mt in self.getLevelList(level):
post.append(mt.weight)
return post
def setLevelWeight(self, weightList, level=0):
'''
The `weightList` is an array of weights to be applied to a
single level of the MeterSequence.
>>> a = meter.MeterSequence('4/4', 4)
>>> a.setLevelWeight([1, 2, 3, 4])
>>> a.getLevelWeight()
[1, 2, 3, 4]
>>> b = meter.MeterSequence('4/4', 4)
>>> b.setLevelWeight([2, 3])
>>> b.getLevelWeight(0)
[2, 3, 2, 3]
>>> b[1] = b[1].subdivide(2)
>>> b[3] = b[3].subdivide(2)
>>> b.getLevelWeight(0)
[2, 3.0, 2, 3.0]
>>> b[3][0] = b[3][0].subdivide(2)
>>> b
<music21.meter.core.MeterSequence {1/4+{1/8+1/8}+1/4+{{1/16+1/16}+1/8}}>
>>> b.getLevelWeight(0)
[2, 3.0, 2, 3.0]
>>> b.getLevelWeight(1)
[2, 1.5, 1.5, 2, 1.5, 1.5]
>>> b.getLevelWeight(2)
[2, 1.5, 1.5, 2, 0.75, 0.75, 1.5]
'''
levelObjs = self.getLevelList(level)
for i in range(len(levelObjs)):
mt = levelObjs[i]
mt.weight = weightList[i % len(weightList)]
# --------------------------------------------------------------------------
# given a quarter note position, return the active index
def offsetToIndex(self, qLenPos, includeCoincidentBoundaries=False) -> int:
'''
Given an offset in quarterLengths (0.0 through self.duration.quarterLength), return
the index of the active MeterTerminal or MeterSequence
>>> a = meter.MeterSequence('4/4')
>>> a.offsetToIndex(0.5)
0
>>> a.offsetToIndex(3.5)
0
>>> a.partition(4)
>>> a.offsetToIndex(0.5)
0
>>> a.offsetToIndex(3.5)
3
>>> a.partition([1, 2, 1])
>>> len(a)
3
>>> a.offsetToIndex(2.9)
1
>>> a[a.offsetToIndex(2.9)]
<music21.meter.core.MeterTerminal 2/4>
>>> a = meter.MeterSequence('4/4')
>>> a.offsetToIndex(5.0)
Traceback (most recent call last):
music21.exceptions21.MeterException: cannot access from qLenPos 5.0
where total duration is 4.0
Negative numbers also raise an exception:
>>> a.offsetToIndex(-0.5)
Traceback (most recent call last):
music21.exceptions21.MeterException: cannot access from qLenPos -0.5
where total duration is 4.0
'''
if qLenPos >= self.duration.quarterLength or qLenPos < 0:
raise MeterException(
f'cannot access from qLenPos {qLenPos} '
+ f'where total duration is {self.duration.quarterLength}'
)
qPos = 0
match = None
for i in range(len(self)):
start = qPos
end = opFrac(qPos + self[i].duration.quarterLength)
# if adjoining ends are permitted, first match is found
if includeCoincidentBoundaries:
if start <= qLenPos <= end:
match = i
break
else:
# note that this is >=, meaning that the first boundary
# is coincident
if start <= qLenPos < end:
match = i
break
qPos = opFrac(qPos + self[i].duration.quarterLength)
return match
def offsetToAddress(self, qLenPos, includeCoincidentBoundaries=False):
| |
import numpy as np
import pandas as pd
import os
def generate_x(x_file):
with open(x_file) as f:
container = f.readlines()
result = []
for line in container:
tmp1 = line.strip()
tmp2 = tmp1.replace(' ', ' ')
# print tmp2
tmp_ary = map(float, tmp2.split(' '))
# nan_count = sum(math.isnan(x) for x in tmp_ary)
# if (tmp_ary[243] != 0.0) & (nan_count < 10):
if (tmp_ary[243] != 0.0):
result.append(tmp_ary)
return np.array(result)
def sel_columns_raw(df):
# Raw denotes lower body sensor data.
# See 'def sel_columns_lower(df)'.
knee_1 = df[[1, 2, 3]].values.flatten()
knee_2 = df[[19, 20, 21]].values.flatten()
hip = df[[4, 5, 6]].values.flatten()
left_1 = df[[102, 103, 104]].values.flatten()
left_2 = df[[105, 106, 107]].values.flatten()
left_3 = df[[108, 109, 110]].values.flatten()
left_4 = df[[111, 112, 113]].values.flatten()
left_5 = df[[114, 115, 116]].values.flatten()
right_1 = df[[118, 119, 120]].values.flatten()
right_2 = df[[121, 122, 123]].values.flatten()
right_3 = df[[124, 125, 126]].values.flatten()
right_4 = df[[127, 128, 129]].values.flatten()
right_5 = df[[130, 131, 132]].values.flatten()
data = np.concatenate((knee_1, knee_2, hip,
left_1, left_2, left_3, left_4, left_5,
right_1, right_2, right_3, right_4, right_5))
# print data.shape
return data
def sel_columns_upper(df):
lua_up = df[[7, 8, 9]]
lua_bottom = df[[28, 29, 30]]
lua_1 = df[[76, 77, 78]]
lua_2 = df[[79, 80, 81]]
lua_3 = df[[82, 83, 84]]
lla_1 = df[[89, 90, 91]]
lla_2 = df[[92, 93, 94]]
lla_3 = df[[95, 96, 97]]
lwr = df[[31, 32, 33]]
lh = df[[13, 14, 15]]
rua_up = df[[25, 26, 27]]
rua_bottom = df[[10, 11, 12]]
rua_1 = df[[50, 51, 52]]
rua_2 = df[[53, 54, 55]]
rua_3 = df[[56, 57, 58]]
rla_1 = df[[63, 64, 65]]
rla_2 = df[[66, 67, 68]]
rla_3 = df[[69, 70, 71]]
#rwr = df[[22, 23, 24]]
#rh = df[[34, 35, 36]]
lua_up_mean = np.mean(lua_up, axis=0)
lua_up_std = np.std(lua_up, axis=0)
lua_up_min = np.min(lua_up, axis=0)
lua_up_max = np.max(lua_up, axis=0)
lua_bottom_mean = np.mean(lua_bottom, axis=0)
lua_bottom_std = np.std(lua_bottom, axis=0)
lua_bottom_min = np.min(lua_bottom, axis=0)
lua_bottom_max = np.max(lua_bottom, axis=0)
lua_1_mean = np.mean(lua_1, axis=0)
lua_1_std = np.std(lua_1, axis=0)
lua_1_min = np.min(lua_1, axis=0)
lua_1_max = np.max(lua_1, axis=0)
lua_2_mean = np.mean(lua_2, axis=0)
lua_2_std = np.std(lua_2, axis=0)
lua_2_min = np.min(lua_2, axis=0)
lua_2_max = np.max(lua_2, axis=0)
lua_3_mean = np.mean(lua_3, axis=0)
lua_3_std = np.std(lua_3, axis=0)
lua_3_min = np.min(lua_3, axis=0)
lua_3_max = np.max(lua_3, axis=0)
lla_1_mean = np.mean(lla_1, axis=0)
lla_1_std = np.std(lla_1, axis=0)
lla_1_min = np.min(lla_1, axis=0)
lla_1_max = np.max(lla_1, axis=0)
lla_2_mean = np.mean(lla_2, axis=0)
lla_2_std = np.std(lla_2, axis=0)
lla_2_min = np.min(lla_2, axis=0)
lla_2_max = np.max(lla_2, axis=0)
lla_3_mean = np.mean(lla_3, axis=0)
lla_3_std = np.std(lla_3, axis=0)
lla_3_min = np.min(lla_3, axis=0)
lla_3_max = np.max(lla_3, axis=0)
lwr_mean = np.mean(lwr, axis=0)
lwr_std = np.std(lwr, axis=0)
lwr_min = np.min(lwr, axis=0)
lwr_max = np.max(lwr, axis=0)
lh_mean = np.mean(lh, axis=0)
lh_std = np.std(lh, axis=0)
lh_min = np.min(lh, axis=0)
lh_max = np.max(lh, axis=0)
rua_up_mean = np.mean(rua_up, axis=0)
rua_up_std = np.std(rua_up, axis=0)
rua_up_min = np.min(rua_up, axis=0)
rua_up_max = np.max(rua_up, axis=0)
rua_bottom_mean = np.mean(rua_bottom, axis=0)
rua_bottom_std = np.std(rua_bottom, axis=0)
rua_bottom_min = np.min(rua_bottom, axis=0)
rua_bottom_max = np.max(rua_bottom, axis=0)
rua_1_mean = np.mean(rua_1, axis=0)
rua_1_std = np.std(rua_1, axis=0)
rua_1_min = np.min(rua_1, axis=0)
rua_1_max = np.max(rua_1, axis=0)
rua_2_mean = np.mean(rua_2, axis=0)
rua_2_std = np.std(rua_2, axis=0)
rua_2_min = np.min(rua_2, axis=0)
rua_2_max = np.max(rua_2, axis=0)
rua_3_mean = np.mean(rua_3, axis=0)
rua_3_std = np.std(rua_3, axis=0)
rua_3_min = np.min(rua_3, axis=0)
rua_3_max = np.max(rua_3, axis=0)
rla_1_mean = np.mean(rla_1, axis=0)
rla_1_std = np.std(rla_1, axis=0)
rla_1_min = np.min(rla_1, axis=0)
rla_1_max = np.max(rla_1, axis=0)
rla_2_mean = np.mean(rla_2, axis=0)
rla_2_std = np.std(rla_2, axis=0)
rla_2_min = np.min(rla_2, axis=0)
rla_2_max = np.max(rla_2, axis=0)
rla_3_mean = np.mean(rla_3, axis=0)
rla_3_std = np.std(rla_3, axis=0)
rla_3_min = np.min(rla_3, axis=0)
rla_3_max = np.max(rla_3, axis=0)
#rwr_mean = np.mean(rwr, axis=0)
#rwr_std = np.std(rwr, axis=0)
#rwr_min = np.min(rwr, axis=0)
#rwr_max = np.max(rwr, axis=0)
#rh_mean = np.mean(rh, axis=0)
#rh_std = np.std(rh, axis=0)
#rh_min = np.min(rh, axis=0)
#rh_max = np.max(rh, axis=0)
data = np.concatenate((lua_up_mean, lua_up_std, lua_up_min, lua_up_max,
lua_bottom_mean, lua_bottom_std, lua_bottom_min, lua_bottom_max,
lua_1_mean, lua_1_std, lua_1_min, lua_1_max,
lua_2_mean, lua_2_std, lua_2_min, lua_2_max,
lua_3_mean, lua_3_std, lua_3_min, lua_3_max,
lla_1_mean, lla_1_std, lla_1_min, lla_1_max,
lla_2_mean, lla_2_std, lla_2_min, lla_2_max,
lla_3_mean, lla_3_std, lla_3_min, lla_3_max,
lwr_mean, lwr_std, lwr_min, lwr_max,
lh_mean, lh_std, lh_min, lh_max,
rua_up_mean, rua_up_std, rua_up_min, rua_up_max,
rua_bottom_mean, rua_bottom_std, rua_bottom_min, rua_bottom_max,
rua_1_mean, rua_1_std, rua_1_min, rua_1_max,
rua_2_mean, rua_2_std, rua_2_min, rua_2_max,
rua_3_mean, rua_3_std, rua_3_min, rua_3_max,
rla_1_mean, rla_1_std, rla_1_min, rla_1_max,
rla_2_mean, rla_2_std, rla_2_min, rla_2_max,
rla_3_mean, rla_3_std, rla_3_min, rla_3_max,
#rwr_mean, rwr_std, rwr_min, rwr_max,
#rh_mean, rh_std, rh_min, rh_max
))
'''
The following three features were removed due to many missing values.
rwr : right wrist
rh : right hand
Executed the following command to find missing value features/sensors:
> nan_valid = np.argwhere(np.isnan(X_valid))
> np.unique(nan[:, 1])
'''
# print data.shape
return data
def sel_columns_lower(df):
knee_1 = df[[1, 2, 3]]
knee_2 = df[[19, 20, 21]]
hip = df[[4, 5, 6]]
left_1 = df[[102, 103, 104]]
left_2 = df[[105, 106, 107]]
left_3 = df[[108, 109, 110]]
left_4 = df[[111, 112, 113]]
left_5 = df[[114, 115, 116]]
right_1 = df[[118, 119, 120]]
right_2 = df[[121, 122, 123]]
right_3 = df[[124, 125, 126]]
right_4 = df[[127, 128, 129]]
right_5 = df[[130, 131, 132]]
knee_1_mean = np.mean(knee_1, axis=0)
knee_1_std = np.std(knee_1, axis=0)
knee_1_min = np.min(knee_1, axis=0)
knee_1_max = np.max(knee_1, axis=0)
knee_2_mean = np.mean(knee_2, axis=0)
knee_2_std = np.std(knee_2, axis=0)
knee_2_min = np.min(knee_2, axis=0)
knee_2_max = np.max(knee_2, axis=0)
hip_mean = np.mean(hip, axis=0)
hip_std = np.std(hip, axis=0)
hip_min = np.min(hip, axis=0)
hip_max = np.max(hip, axis=0)
left_1_mean = np.mean(left_1, axis=0)
left_1_std = np.std(left_1, axis=0)
left_1_min = np.min(left_1, axis=0)
left_1_max = np.max(left_1, axis=0)
left_2_mean = np.mean(left_2, axis=0)
left_2_std = np.std(left_2, axis=0)
left_2_min = np.min(left_2, axis=0)
left_2_max = np.max(left_2, axis=0)
left_3_mean = np.mean(left_3, axis=0)
left_3_std = np.std(left_3, axis=0)
left_3_min = np.min(left_3, axis=0)
left_3_max = np.max(left_3, axis=0)
left_4_mean = np.mean(left_4, axis=0)
left_4_std = np.std(left_4, axis=0)
left_4_min = np.min(left_4, axis=0)
left_4_max = np.max(left_4, axis=0)
left_5_mean = np.mean(left_5, axis=0)
left_5_std = np.std(left_5, axis=0)
left_5_min = np.min(left_5, axis=0)
left_5_max = np.max(left_5, axis=0)
right_1_mean = np.mean(right_1, axis=0)
right_1_std = np.std(right_1, axis=0)
right_1_min = np.min(right_1, axis=0)
right_1_max = np.max(right_1, axis=0)
right_2_mean = np.mean(right_2, axis=0)
right_2_std = np.std(right_2, axis=0)
right_2_min = np.min(right_2, axis=0)
right_2_max = np.max(right_2, axis=0)
right_3_mean = np.mean(right_3, axis=0)
right_3_std = np.std(right_3, axis=0)
right_3_min = np.min(right_3, axis=0)
right_3_max = np.max(right_3, axis=0)
right_4_mean = np.mean(right_4, axis=0)
right_4_std = np.std(right_4, axis=0)
right_4_min = np.min(right_4, axis=0)
right_4_max = np.max(right_4, axis=0)
right_5_mean = np.mean(right_5, axis=0)
right_5_std = np.std(right_5, axis=0)
right_5_min = np.min(right_5, axis=0)
right_5_max = np.max(right_5, axis=0)
data = np.concatenate((knee_1_mean, knee_1_std, knee_1_min, knee_1_max,
knee_2_mean, knee_2_std, knee_2_min, knee_2_max,
hip_mean, hip_std, hip_min, hip_max,
right_1_mean, right_1_std, right_1_min, right_1_max,
right_2_mean, right_2_std, right_2_min, right_2_max,
right_3_mean, right_3_std, right_3_min, right_3_max,
right_4_mean, right_4_std, right_4_min, right_4_max,
right_5_mean, right_5_std, right_5_min, right_5_max,
left_1_mean, left_1_std, left_1_min, left_1_max,
left_2_mean, left_2_std, left_2_min, left_2_max,
left_3_mean, left_3_std, left_3_min, left_3_max,
left_4_mean, left_4_std, left_4_min, left_4_max,
left_5_mean, left_5_std, left_5_min, left_5_max))
# print data.shape
return data
# Make new directory under '../data/OpportunityUCIDataset' folder
processed_dir = "../data/OpportunityUCIDataset/processed"
if not os.path.exists(processed_dir):
os.makedirs(processed_dir)
# Generate training / validation / test data
# files_drill = ['S1-Drill.dat', 'S2-Drill.dat', 'S3-Drill.dat'] # We do not use Drill data.
files_train = ['S2-ADL1.dat', 'S2-ADL2.dat', 'S3-ADL1.dat', 'S3-ADL2.dat', 'S1-ADL1.dat', 'S1-ADL2.dat', 'S1-ADL3.dat']
files_valid = ['S2-ADL3.dat', 'S3-ADL3.dat', 'S1-ADL4.dat', 'S1-ADL5.dat']
files_test = ['S2-ADL4.dat', 'S2-ADL5.dat', 'S3-ADL4.dat', 'S3-ADL5.dat']
data_type = ["raw", "upper", "lower"]
file_type = {"train": files_train, "valid": files_valid, "test": files_test}
dir_in = "/home/hcilab/Documents/HAR/Opportunity/OpportunityUCIDataset/dataset/"
dir_out = "../data/OpportunityUCIDataset/processed/"
for data_type_sel in data_type:
print "------ {} data ------".format(data_type_sel)
for k, v in file_type.iteritems():
X = pd.DataFrame()
y = pd.Series()
for in_file in v:
data = generate_x(dir_in + in_file)
X = X.append(pd.DataFrame(data))
print X.shape
# print X.columns
X_1 = X.loc[X[243] == 1.0]
print "{}_101:".format(k), X_1.shape
X_2 = X.loc[X[243] == 2.0]
print "{}_102:".format(k), X_2.shape
X_3 = X.loc[X[243] == 4.0]
print "{}_104:".format(k), X_3.shape
X_4 = X.loc[X[243] == 5.0]
print "{}_105:".format(k), X_4.shape
X_1 = X_1.fillna(method='ffill')
# print X_1.isnull().sum().values
X_2 = X_2.fillna(method='ffill')
# print X_1.isnull().sum().values
X_3 = X_3.fillna(method='ffill')
# print X_1.isnull().sum().values
X_4 = X_4.fillna(method='ffill')
# print X_1.isnull().sum().values
four_activities_data = [X_1, X_2, X_3, X_4]
window = 15
sliding = 7
data_container = []
label_container = []
for activity in four_activities_data:
for i in range(activity.shape[0] / sliding):
df = activity.iloc[range((0 + i), (window + i))]
label = df[243].astype(int).values
label_container.append(label[-1])
if data_type_sel is "raw":
data_container.append(sel_columns_raw(df))
if data_type_sel is "upper":
data_container.append(sel_columns_upper(df))
if data_type_sel is "lower":
data_container.append(sel_columns_lower(df))
i = i + sliding
print "{}_data:".format(k), np.asarray(data_container).shape
print "{}_label:".format(k), np.asarray(label_container).shape
np.save(dir_out + "{}_{}_X.npy".format(data_type_sel, k), np.asarray(data_container))
np.save(dir_out + "{}_{}_y.npy".format(data_type_sel, k), np.asarray(label_container))
# Note that raw indicates raw 'lower' body data.
'''
/usr/bin/python2.7 /home/hcilab/Documents/OSS/sensors2018cnnhar/opp/generate_data.py
------ | |
<gh_stars>0
#!/usr/bin/env python
# Copyright (c) 2017,2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from f5_cccl import bigip
from f5.bigip import ManagementRoot
import json
from mock import Mock, patch
import pytest
class MockNode():
"""A mock BIG-IP node."""
def __init__(self, name, **kwargs):
"""Initialize the object."""
self.name = name
for key in kwargs:
setattr(self, key, kwargs[key])
self.raw = self.__dict__
def modify(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def update(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def create(self, partition=None, name=None, **kwargs):
"""Create the node object."""
pass
def delete(self):
"""Delete the node object."""
pass
def load(self, name=None, partition=None):
"""Load the node object."""
return MockNode(name)
class Pool():
"""A mock BIG-IP Pool."""
def __init__(self, name, **kwargs):
"""Initialize the object."""
self.name = name
for key in kwargs:
setattr(self, key, kwargs[key])
self.raw = self.__dict__
def modify(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def update(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def create(self, partition=None, name=None, **kwargs):
"""Create the pool object."""
pass
def delete(self):
"""Delete the pool object."""
pass
def load(self, name=None, partition=None):
"""Load the pool object."""
return Pool(name)
class Policy():
"""A mock BIG-IP Policy."""
def __init__(self, name, **kwargs):
"""Initialize the object."""
self.name = name
for key in kwargs:
setattr(self, key, kwargs[key])
self.raw = self.__dict__
def modify(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def update(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def create(self, partition=None, name=None, **kwargs):
"""Create the policy object."""
pass
def delete(self):
"""Delete the policy object."""
pass
def load(self, name=None, partition=None):
"""Load the policy object."""
return Policy(name)
class IRule():
"""A mock BIG-IP iRule."""
def __init__(self, name, **kwargs):
"""Initialize the object."""
self.name = name
for key in kwargs:
setattr(self, key, kwargs[key])
self.raw = self.__dict__
def modify(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def update(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def create(self, partition=None, name=None, **kwargs):
"""Create the iRule object."""
pass
def delete(self):
"""Delete the iRule object."""
pass
def load(self, name=None, partition=None):
"""Load the iRule object."""
return IRule(name)
class VirtualAddress():
"""A mock BIG-IP VirtualAddress."""
def __init__(self, name, **kwargs):
"""Initialize the object."""
self.name = name
for key in kwargs:
setattr(self, key, kwargs[key])
self.raw = self.__dict__
def modify(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def update(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def create(self, partition=None, name=None, **kwargs):
"""Create the virtual address object."""
pass
def delete(self):
"""Delete the virtual address object."""
pass
def load(self, name=None, partition=None):
"""Load the virtual address object."""
return VirtualAddress(name)
class Member():
"""A mock BIG-IP Pool Member."""
def __init__(self, name, **kwargs):
"""Initialize the object."""
self.name = name
self.session = kwargs.get('session', None)
if kwargs.get('state', None) == 'user-up':
self.state = 'up'
else:
self.state = 'user-down'
def modify(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
class Profiles():
"""A container of Virtual Server Profiles."""
def __init__(self, **kwargs):
"""Initialize the object."""
self.profiles = kwargs.get('profiles', [])
def exists(self, name, partition):
"""Check for the existance of a profile."""
for p in self.profiles:
if p['name'] == name and p['partition'] == partition:
return True
return False
def create(self, name, partition):
"""Placeholder: This will be mocked."""
pass
class ProfileSet():
"""A set of Virtual Server Profiles."""
def __init__(self, **kwargs):
"""Initialize the object."""
self.profiles = Profiles(**kwargs)
class Policies():
"""A container of Virtual Server Policies."""
def __init__(self, **kwargs):
"""Initialize the object."""
self.policies = kwargs.get('policies', [])
def exists(self, name, partition):
"""Check for the existance of a policy."""
for p in self.policies:
if p['name'] == name and p['partition'] == partition:
return True
return False
def create(self, name, partition):
"""Placeholder: This will be mocked."""
pass
class PolicySet():
"""A set of Virtual Server Policies."""
def __init__(self, **kwargs):
"""Initialize the object."""
self.policies = Policies(**kwargs)
class Virtual():
"""A mock BIG-IP Virtual Server."""
def __init__(self, name, **kwargs):
"""Initialize the object."""
self.profiles_s = ProfileSet(**kwargs)
self.policies_s = PolicySet(**kwargs)
self.name = name
for key in kwargs:
setattr(self, key, kwargs[key])
self.raw = self.__dict__
def modify(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def create(self, name=None, partition=None, **kwargs):
"""Create the virtual object."""
pass
def delete(self):
"""Delete the virtual object."""
pass
def load(self, name=None, partition=None):
"""Load the virtual object."""
return Virtual(name)
class HealthCheck():
"""A mock BIG-IP Health Monitor."""
def __init__(self, name, **kwargs):
"""Initialize the object."""
self.name = name
self.interval = kwargs.get('interval', None)
self.timeout = kwargs.get('timeout', None)
self.send = kwargs.get('send', None)
self.partition = kwargs.get('partition', None)
def modify(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def delete(self):
"""Delete the healthcheck object."""
pass
class Arp():
"""A mock BIG-IP Arp entry."""
def __init__(self, name, **kwargs):
"""Initialize the object."""
self.name = name
for key in kwargs:
setattr(self, key, kwargs[key])
self.raw = self.__dict__
def modify(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def create(self, name=None, partition=None, **kwargs):
"""Create the ARP object."""
pass
def delete(self):
"""Delete the ARP object."""
pass
def load(self, name=None, partition=None):
"""Load the ARP object."""
return Arp(name)
class FDBTunnel():
"""A mock BIG-IP FDB tunnel entry."""
def __init__(self, name, **kwargs):
"""Initialize the object."""
self.name = name
for key in kwargs:
setattr(self, key, kwargs[key])
self.raw = self.__dict__
def modify(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def update(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def create(self, name=None, partition=None, **kwargs):
"""Create the FDB tunnel object."""
pass
def delete(self):
"""Delete the FDB tunnel object."""
pass
def load(self, name=None, partition=None):
"""Load the FDB tunnel object."""
return FDBTunnel(name)
class Partition():
"""A mock BIG-IP Partition."""
def __init__(self, name, **kwargs):
"""Initialize the object."""
self.name = name
if kwargs.get('default-route-domain') is not None:
self.defaultRouteDomain = kwargs.get('default-route-domain')
else:
self.defaultRouteDomain = 0
for key in kwargs:
setattr(self, key, kwargs[key])
self.raw = self.__dict__
def modify(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def create(self, name=None, **kwargs):
"""Create the partition object."""
pass
def delete(self):
"""Delete the partition object."""
pass
def load(self, name=None):
"""Load the partition object."""
return Partition(name)
class MockPartitions():
"""A mock Auth partitions object."""
def __init__(self):
"""Initialize the object."""
self.partition = Partition('test')
def get_collection(self):
"""Get collection of partitions."""
pass
class MockService():
"""A mock Services service object."""
def __init__(self):
"""Initialize the object."""
pass
def load(self, name, partition):
"""Load a mock iapp."""
return MockService()
def create(self, name=None, template=None, partition=None, variables=None,
tables=None, trafficGroup=None, description=None):
"""Create a mock iapp."""
pass
def update(self, **properties):
"""Update a mock iapp."""
pass
def delete(self):
"""Delete the iapp object."""
pass
class MockServices():
"""A mock Application services object."""
def __init__(self):
"""Initialize the object."""
self.service = MockService()
def get_collection(self):
"""Get collection of iapps."""
return []
class MockApplication():
"""A mock Sys application object."""
def __init__(self):
"""Initialize the object."""
self.services = MockServices()
class MockFolders():
"""A mock Sys folders object."""
def __init__(self):
"""Initialize the object."""
def get_collection(self):
"""Get collection of partitions."""
pass
class MockSys():
"""A mock BIG-IP sys object."""
def __init__(self):
"""Initialize the object."""
self.application = MockApplication()
self.folders = MockFolders()
class Iapp():
"""A mock BIG-IP iapp object."""
def __init__(self, name=None, **kwargs):
"""Initialize the object."""
self.name = name
for key in kwargs:
setattr(self, key, kwargs[key])
self.raw = self.__dict__
def delete(self):
"""Mock delete method."""
pass
def update(self, executeAction=None, name=None, partition=None,
variables=None, tables=None, **kwargs):
"""Mock update method."""
pass
class InternalDataGroup():
"""A mock BIG-IP data_group internal."""
def __init__(self, name, **kwargs):
"""Initialize the object."""
self.name = name
#self.partition = partition
for key in kwargs:
setattr(self, key, kwargs[key])
self.raw = self.__dict__
def modify(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def update(self, **kwargs):
"""Placeholder: This will be mocked."""
pass
def create(self, partition=None, name=None, **kwargs):
"""Create the iRule object."""
pass
def delete(self):
"""Delete the iRule object."""
pass
def load(self, name=None, partition=None):
"""Load the iRule object."""
return InternalDataGroup(name, partition)
class MockFolder():
"""A mock BIG-IP folder object."""
def __init__(self, name):
"""Initialize the object."""
self.name = name
class MockHttp():
"""A mock Http http object."""
def __init__(self, name=None, **kwargs):
"""Initialize the object."""
self.name = name
for key in kwargs:
setattr(self, key, kwargs[key])
self.raw = self.__dict__
def create(self, partition=None, **kwargs):
"""Create a http healthcheck object."""
pass
def delete(self):
"""Delete the monitor object."""
pass
def load(self, name=None, | |
<filename>train.py
import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import pyramidnet as PYRM
import utils
import numpy as np
import torchvision.utils
from torchvision.utils import save_image
import warnings
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
from function import calc_mean_std
from torch.utils.tensorboard import SummaryWriter
import net_cutmix
import net_mixup
from function import adaptive_instance_normalization, coral
import torch.nn.functional as F
from IPython import embed
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from datetime import datetime
warnings.filterwarnings("ignore")
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='StyleMix CIFAR-10, CIFAR-100 training code')
parser.add_argument('--net_type', default='pyramidnet', type=str,
help='networktype: pyramidnet')
parser.add_argument('-j', '--workers', default=40, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=300, type=int, metavar='N',
help='number of total epochs to run') # 250
parser.add_argument('-b', '--batch_size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.25, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=1, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--depth', default=18, type=int,
help='depth of the network (default: 32)')
parser.add_argument('--no-bottleneck', dest='bottleneck', action='store_false',
help='to use basicblock for CIFAR datasets (default: bottleneck)')
parser.add_argument('--dataset', dest='dataset', default='cifar100', type=str,
help='dataset (options: cifar10, cifar100)')
parser.add_argument('--no-verbose', dest='verbose', action='store_false',
help='to print the status at every iteration')
parser.add_argument('--alpha', default=200, type=float,
help='number of new channel increases per depth (default: 200)')
parser.add_argument('--expname', default='PyraNet200', type=str,
help='name of experiment')
parser.add_argument('--vgg', type=str, default='./models/vgg_normalised.pth')
parser.add_argument('--decoder', type=str, default='./models/decoder.pth.tar')
parser.add_argument('--prob', default=0.5, type=float)
parser.add_argument('--r', default=0.7, type=float)
parser.add_argument('--alpha1', default=1.0, type=float)
parser.add_argument('--alpha2', default=1.0, type=float)
parser.add_argument('--delta', default=3.0, type=float)
parser.add_argument('--method', type=str, default='StyleCutMix_Auto_Gamma', help='StyleCutMix_Auto_Gamma, StyleCutMix, StyleMix')
parser.add_argument('--save_dir', type=str, default='/write/your/save/dir')
parser.add_argument('--data_dir', type=str, default='/write/your/data/dir')
parser.set_defaults(bottleneck=True)
parser.set_defaults(verbose=True)
best_err1 = 100
best_err5 = 100
def main():
global args, best_err1, best_err5, styleDistanceMatrix, writer
args = parser.parse_args()
writer = SummaryWriter(args.save_dir+'/writer/'+args.method)
if args.method == 'StyleCutMix_Auto_Gamma' :
if args.dataset == 'cifar100':
styleDistanceMatrix = torch.load('styleDistanceMatrix100.pt', map_location='cuda:0')
elif args.dataset == 'cifar10':
styleDistanceMatrix = torch.load('styleDistanceMatrix10.pt', map_location='cuda:0')
else :
raise Exception('unknown dataset: {}'.format(args.dataset))
styleDistanceMatrix = styleDistanceMatrix.cpu()
ind = torch.arange(styleDistanceMatrix.shape[1])
styleDistanceMatrix[ind, ind] += 2 # Prevent diagonal lines from zero
global decoder, vgg, pretrained, network_E, network_D
if args.method.startswith('Style'):
if args.method.startswith('StyleCutMix'):
decoder = net_cutmix.decoder
vgg = net_cutmix.vgg
print("select network StyleCutMix")
network_E = net_cutmix.Net_E(vgg)
network_D = net_cutmix.Net_D(vgg, decoder)
elif args.method == 'StyleMix':
decoder = net_mixup.decoder
vgg = net_mixup.vgg
print("select network StyleMix")
network_E = net_mixup.Net_E(vgg)
network_D = net_mixup.Net_D(vgg, decoder)
else :
raise Exception('unknown method: {}'.format(args.method))
decoder.eval()
vgg.eval()
decoder.load_state_dict(torch.load(args.decoder))
vgg.load_state_dict(torch.load(args.vgg))
vgg = nn.Sequential(*list(vgg.children())[:31])
vgg.cuda()
decoder.cuda()
network_E.eval()
network_D.eval()
network_E = torch.nn.DataParallel(network_E).cuda()
network_D = torch.nn.DataParallel(network_D).cuda()
if args.dataset.startswith('cifar'):
normalize = transforms.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
std=[x / 255.0 for x in [63.0, 62.1, 66.7]])
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
transform_test = transforms.Compose([
transforms.ToTensor(),
normalize
])
if args.dataset == 'cifar100':
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(args.data_dir+'/dataCifar100/', train=True, download=True, transform=transform_train),
batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(args.data_dir+'/dataCifar100/', train=False, transform=transform_test),
batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True)
numberofclass = 100
elif args.dataset == 'cifar10':
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(args.data_dir+'/dataCifar10/', train=True, download=True, transform=transform_train),
batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(args.data_dir+'/dataCifar10/', train=False, transform=transform_test),
batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True)
numberofclass = 10
else:
raise Exception('unknown dataset: {}'.format(args.dataset))
else:
raise Exception('unknown dataset: {}'.format(args.dataset))
print("=> creating model '{}'".format(args.net_type))
if args.net_type == 'pyramidnet':
model = PYRM.PyramidNet(args.dataset, args.depth, args.alpha, numberofclass,
args.bottleneck)
else:
raise Exception('unknown network architecture: {}'.format(args.net_type))
model = torch.nn.DataParallel(model).cuda()
print(model)
print('the number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay, nesterov=True)
cudnn.benchmark = True
for epoch in range(0, args.epochs):
adjust_learning_rate(optimizer, epoch)
# train for one epoch
train_loss = train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
err1, err5, val_loss = validate(val_loader, model, criterion, epoch)
writer.add_scalar('train_loss', train_loss, epoch+1)
writer.add_scalar('val_loss', val_loss, epoch+1)
writer.add_scalar('err1', err1, epoch+1)
writer.add_scalar('err5', err5, epoch+1)
# remember best prec@1 and save checkpoint
is_best = err1 <= best_err1
best_err1 = min(err1, best_err1)
if is_best:
best_err5 = err5
print('Current best accuracy (top-1 and 5 error):', best_err1, best_err5)
save_checkpoint({
'epoch': epoch,
'arch': args.net_type,
'state_dict': model.state_dict(),
'best_err1': best_err1,
'best_err5': best_err5,
'optimizer': optimizer.state_dict(),
}, is_best, args.save_dir, args.dataset)
print('Best accuracy (top-1 and 5 error):', best_err1, best_err5)
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
start = time.time()
end = time.time()
current_LR = get_learning_rate(optimizer)[0]
print("current_LR : ",current_LR)
for i, (input, target) in enumerate(train_loader):
data_time.update(time.time() - end)
input = input.cuda()
target = target.cuda()
prob = np.random.rand(1)
if prob < args.prob:
rand_index = torch.randperm(input.size()[0]).cuda()
target_1 = target
target_2 = target[rand_index]
if args.method.startswith('StyleCutMix'):
if args.method == 'StyleCutMix_Auto_Gamma' :
styleDistance = styleDistanceMatrix[target_1, target_2]
gamma = torch.tanh(styleDistance/args.delta)
else :
gamma = np.random.beta(args.alpha2, args.alpha2)
u = nn.Upsample(size=(224, 224), mode='bilinear')
x1 = u(input)
x2 = x1[rand_index]
rs = np.random.beta(args.alpha1, args.alpha1)
M = torch.zeros(1,1,224,224).float()
lam_temp = np.random.beta(args.alpha1, args.alpha1)
bbx1, bby1, bbx2, bby2 = rand_bbox(M.size(), 1.-lam_temp)
with torch.no_grad():
x1_feat = network_E(x1)
mixImage = network_D(x1, x2, x1_feat, x1_feat[rand_index], rs, gamma, bbx1, bby1, bbx2, bby2)
lam = ((bbx2 - bbx1)*(bby2-bby1)/(224.*224.))
uinv = nn.Upsample(size=(32,32), mode='bilinear')
output = model(uinv(mixImage))
log_preds = F.log_softmax(output, dim=-1) # dimension [batch_size, numberofclass]
a_loss = -log_preds[torch.arange(output.shape[0]),target_1] # cross-entropy for A
b_loss = -log_preds[torch.arange(output.shape[0]),target_2] # cross-entropy for B
if args.method == 'StyleCutMix_Auto_Gamma':
gamma = gamma.cuda()
lam_s = gamma * lam + (1.0 - gamma) * rs
loss_c = a_loss * (lam) + b_loss * (1. - lam)
loss_s = a_loss * (lam_s) + b_loss * (1. - lam_s)
r = args.r
loss = (r * loss_c + (1.0 - r) * loss_s).mean()
elif args.method == 'StyleMix':
u = nn.Upsample(size=(224, 224), mode='bilinear')
x1 = u(input)
x2 = x1[rand_index]
rc = np.random.beta(args.alpha1, args.alpha1)
rs = np.random.beta(args.alpha1, args.alpha1)
with torch.no_grad():
x1_feat = network_E(x1)
mixImage = network_D(x1_feat, x1_feat[rand_index], rc, rs)
uinv = nn.Upsample(size=(32,32), mode='bilinear')
output = model(uinv(mixImage))
loss_c = rc * criterion(output, target_1) + (1.0 - rc) * criterion(output, target_2)
loss_s = rs * criterion(output, target_1) + (1.0 - rs) * criterion(output, target_2)
r = args.r
loss = r * loss_c + (1.0-r) * loss_s
else:
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
err1, err5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(err1.item(), input.size(0))
top5.update(err5.item(), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0 and args.verbose == True:
print('Epoch: [{0}/{1}][{2}/{3}]\t'
'LR: {LR:.6f}\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Top 1-err {top1.val:.4f} ({top1.avg:.4f})\t'
'Top 5-err {top5.val:.4f} ({top5.avg:.4f})'.format(
epoch, args.epochs, i, len(train_loader), LR=current_LR, batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
print("Time taken for 1 epoch : ",time.time()-start)
print('* Epoch: [{0}/{1}]\t Top 1-err {top1.avg:.3f} Top 5-err {top5.avg:.3f}\t Train Loss {loss.avg:.3f}'.format(
epoch, args.epochs, top1=top1, top5=top5, loss=losses))
return losses.avg
def rand_bbox(size, lam):
W = size[2]
H = size[3]
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
def validate(val_loader, model, criterion, epoch):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda()
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
err1, err5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(err1.item(), input.size(0))
top5.update(err5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0 and args.verbose == True:
print('Test (on val set): [{0}/{1}][{2}/{3}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Top 1-err {top1.val:.4f} ({top1.avg:.4f})\t'
'Top 5-err {top5.val:.4f} ({top5.avg:.4f})'.format(
epoch, args.epochs, i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print('* Epoch: [{0}/{1}]\t Top 1-err {top1.avg:.3f} Top 5-err {top5.avg:.3f}\t Test Loss {loss.avg:.3f}'.format(
epoch, args.epochs, top1=top1, top5=top5, loss=losses))
return top1.avg, top5.avg, losses.avg
def save_checkpoint(state, is_best, save_dir, dataset, filename='checkpoint.pth.tar'):
directory = save_dir+"/model/"+dataset+"/"+str(args.method)+"/%s/" % (args.expname)
if not os.path.exists(directory):
os.makedirs(directory)
filename = directory + filename
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, save_dir+"/model/"+dataset+"/"+str(args.method)+'/%s/' % (args.expname) + 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = | |
<filename>covid19_stats/engine/viz.py
import os, sys, numpy, titlecase, time, pandas, zipfile, mutagen.mp4
import subprocess, tempfile, shutil, datetime, logging, copy
import pathos.multiprocessing as multiprocessing
from itertools import chain
from multiprocessing import Value, Manager
import cartopy.feature as cfeature
import cartopy.crs as ccrs
from matplotlib.axes import Axes
from matplotlib.patches import Polygon
from matplotlib.cm import ScalarMappable
from matplotlib.colors import LogNorm, Normalize, to_rgba
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from mpl_toolkits.axes_grid1 import make_axes_locatable
from distutils.spawn import find_executable
from nprstuff.core import autocrop_image
#
from covid19_stats.engine import gis, core, get_string_commas_num, find_plausible_maxnum
def my_colorbar( mappable, ax, **kwargs ):
"""
secret saucing (explanation is incomprehensible) from https://joseph-long.com/writing/colorbars. I do not understand how it works the way it does, but it does! I shamelessly copy the method description from the :py:meth:`colorbar method <matplotlib.pyplot.colorbar>`. I have also updated this thing to `this website <https://stackoverflow.com/questions/30030328/correct-placement-of-colorbar-relative-to-geo-axes-cartopy>`_ that now works on :py:class:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes>`.
:param mappable: a :py:class:`ScalarMappable <matplotlib.cm.ScalarMappable>` described by this colorbar.
:param ax: the parent :py:class:`Axes <matplotlib.axes.Axes>` from whose space a new colorbar axes will be stolen.
:returns: the underlying :py:class:`Colorbar <matplotlib.colorbar.Colorbar>`.
:rtype: :py:class:`Colorbar <matplotlib.colorbar.Colorbar>`
"""
fig = ax.figure
divider = make_axes_locatable( ax )
cax = divider.append_axes("right", size="5%", pad=0.05, axes_class = Axes )
cbar = fig.colorbar(mappable, cax=cax, **kwargs)
return cbar
def create_and_draw_fromfig(
fig, bbox, river_linewidth = 5, river_alpha = 0.3,
coast_linewidth = 2, coast_alpha = 0.4, drawGrid = True, mult_bounds_lat = 1.05,
mult_bounds_lng = 1.05, rows = 1, cols = 1, num = 1 ):
"""
This creates an :py:class:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes>`, with lots of physical geographic features, and optional (but turned on by default) latitude and longitude gridding, of a region specified by a bounding box. This uses `stereographic projection`_. For example, here is the :py:class:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes>` displaying the CONUS_.
.. _viz_create_and_draw_fromfig_conus:
.. figure:: /_static/viz/viz_create_and_draw_fromfig_conus.png
:width: 100%
:align: left
Demonstrations of this functionality, which underlies (or overlays?) the geographical features for visualizing COVID-19 cases and deaths.
Here are the arguments.
:param fig: the :py:class:`Figure <matplotlib.figure.Figure>` onto which to create a :py:class:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes>` containing geographic features. Last three arguments -- ``rows``, ``cols``, and ``num`` -- describe the relative placement of the created :py:class:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes>`. See :py:meth:`add_subplot <matplotlib.figure.Figure.add_subplot>` for those three arguments' meanings.
:param tuple bbox: a four-element :py:class:`tuple`. Elements in order are *minimum* longitude, *minimum* latitude, *maximum* longitude, and *maximum* latitude.
:param int river_linewidth: the width, in pixels, of river geographical features.
:param float river_alpha: the color alpha of river geographical features.
:param int coast_linewidth: the width, in pixels, of the coast lines.
:param float coast_alpha: the color alpha of coast lines.
:param bool drawGrid: if ``True``, then overlay the latitude and longitude grid lines. Otherwise do not. Default is ``True``.
:param float mult_bounds_lat: often times, especially with geographic regions that cover a significant area of the earth, we need to put a multiplier :math:`> 1` on the *latitudinal* extent of the plot, so that *all* features can be seen. By default this value is 1.05, but it must be :math:`\ge 1`.
:param float mult_bounds_lng: often times, especially with geographic regions that cover a significant area of the earth, we need to put a multiplier :math:`> 1` on the *longitudinal* extent of the plot, so that *all* features can be seen. By default this value is 1.05, but it must be :math:`\ge 1`.
:param int rows: the number of rows for axes in the :py:class:`Figure <matplotlib.figure.Figure>` grid. Must be :math:`\ge 1`, and by default is 1.
:param int cols: the number of columns for axes in the :py:class:`Figure <matplotlib.figure.Figure>` grid. Must be :math:`\ge 1`, and by default is 1.
:param int num: the plot number of the :py:class:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes>` in this :py:class:`Figure <matplotlib.figure.Figure>` grid. Must be :math:`\ge 1` and :math:`\le`\ ``rows`` times ``columns``. Its default is 1. Look at :py:meth:`add_subplot <matplotlib.figure.Figure.add_subplot>` for its meaning.
:rtype: :py:class:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes>`
.. _`stereographic projection`: https://en.wikipedia.org/wiki/Stereographic_projection
"""
assert( mult_bounds_lat >= 1.0 )
assert( mult_bounds_lng >= 1.0 )
assert( rows >= 1 )
assert( cols >= 1 )
assert( num >= 1 )
assert( num <= rows * cols )
#
min_lng, min_lat, max_lng, max_lat = bbox
lng_center = 0.5 * ( min_lng + max_lng )
lat_center = 0.5 * ( min_lat + max_lat )
lng_delta = mult_bounds_lng * ( max_lng - min_lng ) * 0.5
lat_delta = mult_bounds_lat * ( max_lat - min_lat ) * 0.5
#
ax = fig.add_subplot(
rows, cols, num, projection = ccrs.Stereographic(
central_latitude = lat_center, central_longitude = lng_center ) )
#
## now set the extent
ax.set_extent( (
lng_center - lng_delta, lng_center + lng_delta,
lat_center - lat_delta, lat_center + lat_delta ) )
#
## draw the grid lines if selected
if drawGrid: ax.gridlines( draw_labels = True )
#
## coastlines, linewidth = coast_linewidth, alpha = coast_alpha, is black
ax.coastlines(
linewidth = coast_linewidth,
color = numpy.array([ 0.0, 0.0, 0.0, coast_alpha ]) )
#
## rivers with linewidth = river_linewidth, alpha = river_alpha
riverf = cfeature.NaturalEarthFeature(
'physical', 'rivers_lake_centerlines', cfeature.auto_scaler,
edgecolor = numpy.concatenate([ cfeature.COLORS['water'], [ river_alpha, ] ] ),
facecolor='never', linewidth = river_linewidth )
ax.add_feature( riverf )
#
## lakes with alpha = river_alpha
lakef = cfeature.NaturalEarthFeature(
'physical', 'lakes', cfeature.auto_scaler,
edgecolor = 'face',
facecolor = numpy.concatenate([ cfeature.COLORS['water'], [ river_alpha, ] ]) )
ax.add_feature( lakef )
return ax
def display_fips_geom( fips_data, fig, **kwargs ):
"""
Demonstrative plot, returning a :py:class:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes>`, of a FIPS data collection. For example, for the NYC Metro Area, this is,
.. _viz_display_fips_geom_nyc:
.. figure:: /_static/viz/viz_display_fips_geom_nyc.png
:width: 100%
:align: left
Demonstration of this method showing the counties in the NYC Metro Area. One can extract the patches in this object to manually change the colors of these county polygons.
Here are the arguments.
:param dict fips_data: the :py:class:`dict` of FIPS geographic data. This has keys of ``prefix``, ``fips``, and ``population``. Look at :ref:`the St. Louis FIPS region data <stlouis_msa_example_data>` for its structure.
:param fig: the :py:class:`Figure <matplotlib.figure.Figure>` onto which to draw this :py:class:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes>`.
:rtype: :py:class:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes>`
"""
bdict = core.get_boundary_dict( fips_data[ 'fips' ] )
bbox = gis.calculate_total_bbox( chain.from_iterable( bdict.values( ) ) )
ax = create_and_draw_fromfig( fig, bbox, **kwargs )
fc = list( to_rgba( '#1f77b4' ) )
fc[-1] = 0.25
for fips in bdict:
for shape in bdict[ fips ]:
poly = Polygon(
shape, closed = True,
edgecolor = 'k', linewidth = 2.0, linestyle = 'dashed',
facecolor = tuple(fc), alpha = 1.0, transform = ccrs.PlateCarree( ) )
ax.add_patch( poly )
return ax
def display_fips( collection_of_fips, fig, **kwargs ):
"""
Method that is very similar to :py:meth:`display_fips_geom <covid19_stats.engine.viz.display_fips_geom>`, except this *also* displays the FIPS code of each county. For example, for `Rhode Island`_, this is.
.. _viz_display_fips_rhodeisland:
.. figure:: /_static/viz/viz_display_fips_rhodeisland.png
:width: 100%
:align: left
Demonstration of this method showing the counties in `Rhode Island`_. The FIPS code of each county is shown in red. One can extract the patches in this object to manually change the colors of these county polygons.
Here are the arguments.
:param collection_of_fips: can be a :py:class:`list`, :py:class:`set`, or other iterable of FIPS codes to visualize and label.
:param fig: the :py:class:`Figure <matplotlib.figure.Figure>` onto which to draw this :py:class:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes>`.
:rtype: :py:class:`GeoAxes <cartopy.mpl.geoaxes.GeoAxes>`
.. _`Rhode Island`: https://en.wikipedia.org/wiki/Rhode_Island
"""
bdict = core.get_boundary_dict( collection_of_fips )
bbox = gis.calculate_total_bbox( chain.from_iterable( bdict.values( ) ) )
ax = create_and_draw_fromfig( fig, bbox, **kwargs )
fc = list( to_rgba( '#1f77b4' ) )
fc[-1] = 0.25
for fips in sorted( bdict ):
for shape in bdict[ fips ]:
poly = Polygon(
shape, closed = True,
edgecolor = 'k', linewidth = 2.0, linestyle = 'dashed',
facecolor = tuple( fc ), alpha = 1.0, transform = ccrs.PlateCarree( ) )
ax.add_patch( poly )
lng_cent = shape[:,0].mean( )
lat_cent = shape[:,1].mean( )
ax.text(
lng_cent, lat_cent, fips, fontsize = 10, fontweight = 'bold', color = 'red',
transform = ccrs.PlateCarree( ) )
return ax
def display_msa( msaname, fig, doShow = False, **kwargs ):
"""
Convenience method that visualizes and labels, by FIPS code, the counties in a `Metropolitan Statistical Area <msa_>`_. It can optionally save the output to a file, ``msa_<msaname>_counties.png``. Here is an example of the NYC Metro Area.
.. _viz_display_msa_nyc:
.. figure:: /_static/viz/viz_display_msa_nyc.png
| |
False
**units**\: byte
"""
_prefix = 'pbr-vservice-mgr-oper'
_revision = '2017-05-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.Data.Sfp.Term, self).__init__()
self.yang_name = "term"
self.yang_parent_name = "sfp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('terminated_pkts', (YLeaf(YType.uint64, 'terminated-pkts'), ['int'])),
('terminated_bytes', (YLeaf(YType.uint64, 'terminated-bytes'), ['int'])),
])
self.terminated_pkts = None
self.terminated_bytes = None
self._segment_path = lambda: "term"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.Data.Sfp.Term, ['terminated_pkts', 'terminated_bytes'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_pbr_vservice_mgr_oper as meta
return meta._meta_table['GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.Data.Sfp.Term']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_pbr_vservice_mgr_oper as meta
return meta._meta_table['GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.Data.Sfp']['meta_info']
class SpiSi(_Entity_):
"""
SPI SI stats
.. attribute:: processed_pkts
Number of packets processed
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: processed_bytes
Total bytes processed
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
**units**\: byte
"""
_prefix = 'pbr-vservice-mgr-oper'
_revision = '2017-05-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.Data.SpiSi, self).__init__()
self.yang_name = "spi-si"
self.yang_parent_name = "data"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('processed_pkts', (YLeaf(YType.uint64, 'processed-pkts'), ['int'])),
('processed_bytes', (YLeaf(YType.uint64, 'processed-bytes'), ['int'])),
])
self.processed_pkts = None
self.processed_bytes = None
self._segment_path = lambda: "spi-si"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.Data.SpiSi, ['processed_pkts', 'processed_bytes'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_pbr_vservice_mgr_oper as meta
return meta._meta_table['GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.Data.SpiSi']['meta_info']
class Term(_Entity_):
"""
Terminate stats
.. attribute:: terminated_pkts
Number of terminated packets
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: terminated_bytes
Total bytes terminated
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
**units**\: byte
"""
_prefix = 'pbr-vservice-mgr-oper'
_revision = '2017-05-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.Data.Term, self).__init__()
self.yang_name = "term"
self.yang_parent_name = "data"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('terminated_pkts', (YLeaf(YType.uint64, 'terminated-pkts'), ['int'])),
('terminated_bytes', (YLeaf(YType.uint64, 'terminated-bytes'), ['int'])),
])
self.terminated_pkts = None
self.terminated_bytes = None
self._segment_path = lambda: "term"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.Data.Term, ['terminated_pkts', 'terminated_bytes'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_pbr_vservice_mgr_oper as meta
return meta._meta_table['GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.Data.Term']['meta_info']
class Sf(_Entity_):
"""
Service function stats
.. attribute:: processed_pkts
Number of packets processed
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: processed_bytes
Total bytes processed
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
**units**\: byte
"""
_prefix = 'pbr-vservice-mgr-oper'
_revision = '2017-05-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.Data.Sf, self).__init__()
self.yang_name = "sf"
self.yang_parent_name = "data"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('processed_pkts', (YLeaf(YType.uint64, 'processed-pkts'), ['int'])),
('processed_bytes', (YLeaf(YType.uint64, 'processed-bytes'), ['int'])),
])
self.processed_pkts = None
self.processed_bytes = None
self._segment_path = lambda: "sf"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.Data.Sf, ['processed_pkts', 'processed_bytes'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_pbr_vservice_mgr_oper as meta
return meta._meta_table['GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.Data.Sf']['meta_info']
class Sff(_Entity_):
"""
Service function forwarder stats
.. attribute:: processed_pkts
Number of packets processed
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: processed_bytes
Total bytes processed
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
**units**\: byte
"""
_prefix = 'pbr-vservice-mgr-oper'
_revision = '2017-05-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.Data.Sff, self).__init__()
self.yang_name = "sff"
self.yang_parent_name = "data"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('processed_pkts', (YLeaf(YType.uint64, 'processed-pkts'), ['int'])),
('processed_bytes', (YLeaf(YType.uint64, 'processed-bytes'), ['int'])),
])
self.processed_pkts = None
self.processed_bytes = None
self._segment_path = lambda: "sff"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.Data.Sff, ['processed_pkts', 'processed_bytes'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_pbr_vservice_mgr_oper as meta
return meta._meta_table['GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.Data.Sff']['meta_info']
class SffLocal(_Entity_):
"""
Local service function forwarder stats
.. attribute:: malformed_err_pkts
Number of packets with invalid NSH header
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: lookup_err_pkts
Number of packets with unknown spi\-si
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: malformed_err_bytes
Total bytes with invalid NSH header
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
**units**\: byte
.. attribute:: lookup_err_bytes
Total bytes with unknown spi\-si
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
**units**\: byte
"""
_prefix = 'pbr-vservice-mgr-oper'
_revision = '2017-05-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.Data.SffLocal, self).__init__()
self.yang_name = "sff-local"
self.yang_parent_name = "data"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('malformed_err_pkts', (YLeaf(YType.uint64, 'malformed-err-pkts'), ['int'])),
('lookup_err_pkts', (YLeaf(YType.uint64, 'lookup-err-pkts'), ['int'])),
('malformed_err_bytes', (YLeaf(YType.uint64, 'malformed-err-bytes'), ['int'])),
('lookup_err_bytes', (YLeaf(YType.uint64, 'lookup-err-bytes'), ['int'])),
])
self.malformed_err_pkts = None
self.lookup_err_pkts = None
self.malformed_err_bytes = None
self.lookup_err_bytes = None
self._segment_path = lambda: "sff-local"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.Data.SffLocal, ['malformed_err_pkts', 'lookup_err_pkts', 'malformed_err_bytes', 'lookup_err_bytes'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_pbr_vservice_mgr_oper as meta
return meta._meta_table['GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.Data.SffLocal']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_pbr_vservice_mgr_oper as meta
return meta._meta_table['GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.Data']['meta_info']
class SiArr(_Entity_):
"""
SI array in case of detail stats
.. attribute:: data
Stats counter for this index
**type**\: :py:class:`Data <ydk.models.cisco_ios_xr.Cisco_IOS_XR_pbr_vservice_mgr_oper.GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.SiArr.Data>`
**config**\: False
.. attribute:: si
Service index
**type**\: int
**range:** 0..255
**config**\: False
"""
_prefix = 'pbr-vservice-mgr-oper'
_revision = '2017-05-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.SiArr, self).__init__()
self.yang_name = "si-arr"
self.yang_parent_name = "sf-name"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("data", ("data", GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.SiArr.Data))])
self._leafs = OrderedDict([
('si', (YLeaf(YType.uint8, 'si'), ['int'])),
])
self.si = None
self.data = GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.SiArr.Data()
self.data.parent = self
self._children_name_map["data"] = "data"
self._segment_path = lambda: "si-arr"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.SiArr, ['si'], name, value)
class Data(_Entity_):
"""
Stats counter for this index
.. attribute:: spi_si
SF/SFF stats
**type**\: :py:class:`SpiSi <ydk.models.cisco_ios_xr.Cisco_IOS_XR_pbr_vservice_mgr_oper.GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.SiArr.Data.SpiSi>`
**config**\: False
.. attribute:: term
Terminate stats
**type**\: :py:class:`Term <ydk.models.cisco_ios_xr.Cisco_IOS_XR_pbr_vservice_mgr_oper.GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.SiArr.Data.Term>`
**config**\: False
.. attribute:: type
type
**type**\: :py:class:`VsNshStats <ydk.models.cisco_ios_xr.Cisco_IOS_XR_pbr_vservice_mgr_oper.VsNshStats>`
**config**\: False
"""
_prefix = 'pbr-vservice-mgr-oper'
_revision = '2017-05-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.SiArr.Data, self).__init__()
self.yang_name = "data"
self.yang_parent_name = "si-arr"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("spi-si", ("spi_si", GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.SiArr.Data.SpiSi)), ("term", ("term", GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.SiArr.Data.Term))])
self._leafs = OrderedDict([
('type', (YLeaf(YType.enumeration, 'type'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_pbr_vservice_mgr_oper', 'VsNshStats', '')])),
])
self.type = None
self.spi_si = GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.SiArr.Data.SpiSi()
self.spi_si.parent = self
self._children_name_map["spi_si"] = "spi-si"
self.term = GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.SiArr.Data.Term()
self.term.parent = self
self._children_name_map["term"] = "term"
self._segment_path = lambda: "data"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.SiArr.Data, ['type'], name, value)
class SpiSi(_Entity_):
"""
SF/SFF stats
.. attribute:: processed_pkts
Number of packets processed
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: processed_bytes
Total bytes processed
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
**units**\: byte
"""
_prefix = 'pbr-vservice-mgr-oper'
_revision = '2017-05-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.SiArr.Data.SpiSi, self).__init__()
self.yang_name = "spi-si"
self.yang_parent_name = "data"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('processed_pkts', (YLeaf(YType.uint64, 'processed-pkts'), ['int'])),
('processed_bytes', (YLeaf(YType.uint64, 'processed-bytes'), ['int'])),
])
self.processed_pkts = None
self.processed_bytes = None
self._segment_path = lambda: "spi-si"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.SiArr.Data.SpiSi, ['processed_pkts', 'processed_bytes'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_pbr_vservice_mgr_oper as meta
return meta._meta_table['GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.SiArr.Data.SpiSi']['meta_info']
class Term(_Entity_):
"""
Terminate stats
.. attribute:: terminated_pkts
Number of terminated packets
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
.. attribute:: terminated_bytes
Total bytes terminated
**type**\: int
**range:** 0..18446744073709551615
**config**\: False
**units**\: byte
"""
_prefix = 'pbr-vservice-mgr-oper'
_revision = '2017-05-01'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.SiArr.Data.Term, self).__init__()
self.yang_name = "term"
self.yang_parent_name = "data"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('terminated_pkts', (YLeaf(YType.uint64, 'terminated-pkts'), ['int'])),
('terminated_bytes', (YLeaf(YType.uint64, 'terminated-bytes'), ['int'])),
])
self.terminated_pkts = None
self.terminated_bytes = None
self._segment_path = lambda: "term"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(GlobalServiceFunctionChaining.ServiceFunction.SfNames.SfName.SiArr.Data.Term, ['terminated_pkts', 'terminated_bytes'], name, value)
@staticmethod
def _meta_info():
from |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.