body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def locate():
'Locate the image file/index if decode fails.'
if (self.seq is not None):
idx = self.seq[((self.cur % self.num_image) - 1)]
else:
idx = ((self.cur % self.num_image) - 1)
if (self.imglist is not None):
(_, fname) = self.imglist[idx]
msg = 'filename: {}'.format(fname)
else:
msg = 'index: {}'.format(idx)
return ('Broken image ' + msg) | 6,663,479,313,187,464,000 | Locate the image file/index if decode fails. | python/mxnet/image/image.py | locate | Vikas89/private-mxnet | python | def locate():
if (self.seq is not None):
idx = self.seq[((self.cur % self.num_image) - 1)]
else:
idx = ((self.cur % self.num_image) - 1)
if (self.imglist is not None):
(_, fname) = self.imglist[idx]
msg = 'filename: {}'.format(fname)
else:
msg = 'index: {}'.format(idx)
return ('Broken image ' + msg) |
def testV1beta1CPUTargetUtilization(self):
'\n Test V1beta1CPUTargetUtilization\n '
model = openshift.client.models.v1beta1_cpu_target_utilization.V1beta1CPUTargetUtilization() | -8,290,292,325,380,832,000 | Test V1beta1CPUTargetUtilization | openshift/test/test_v1beta1_cpu_target_utilization.py | testV1beta1CPUTargetUtilization | flaper87/openshift-restclient-python | python | def testV1beta1CPUTargetUtilization(self):
'\n \n '
model = openshift.client.models.v1beta1_cpu_target_utilization.V1beta1CPUTargetUtilization() |
def gen_base_anchors(self):
'Generate base anchors.\n\n Returns:\n list(torch.Tensor): Base anchors of a feature grid in multiple feature levels.\n '
multi_level_base_anchors = []
for (i, base_size) in enumerate(self.base_sizes):
base_anchors = self.gen_single_level_base_anchors(base_size, scales=self.scales[i], ratios=self.ratios[i], center=self.centers[i])
indices = list(range(len(self.ratios[i])))
indices.insert(1, len(indices))
base_anchors = torch.index_select(base_anchors, 0, torch.LongTensor(indices))
multi_level_base_anchors.append(base_anchors)
return multi_level_base_anchors | -7,490,404,798,596,648,000 | Generate base anchors.
Returns:
list(torch.Tensor): Base anchors of a feature grid in multiple feature levels. | xmmdet/core/anchor/anchor_generator.py | gen_base_anchors | www516717402/edgeai-mmdetection | python | def gen_base_anchors(self):
'Generate base anchors.\n\n Returns:\n list(torch.Tensor): Base anchors of a feature grid in multiple feature levels.\n '
multi_level_base_anchors = []
for (i, base_size) in enumerate(self.base_sizes):
base_anchors = self.gen_single_level_base_anchors(base_size, scales=self.scales[i], ratios=self.ratios[i], center=self.centers[i])
indices = list(range(len(self.ratios[i])))
indices.insert(1, len(indices))
base_anchors = torch.index_select(base_anchors, 0, torch.LongTensor(indices))
multi_level_base_anchors.append(base_anchors)
return multi_level_base_anchors |
def __repr__(self):
'str: a string that describes the module'
indent_str = ' '
repr_str = (self.__class__.__name__ + '(\n')
repr_str += f'''{indent_str}strides={self.strides},
'''
repr_str += f'''{indent_str}scales={self.scales},
'''
repr_str += f'''{indent_str}scale_major={self.scale_major},
'''
repr_str += f'''{indent_str}input_size={self.input_size},
'''
repr_str += f'''{indent_str}scales={self.scales},
'''
repr_str += f'''{indent_str}ratios={self.ratios},
'''
repr_str += f'''{indent_str}num_levels={self.num_levels},
'''
repr_str += f'''{indent_str}base_sizes={self.base_sizes},
'''
repr_str += f'{indent_str}basesize_ratio_range='
repr_str += f'{self.basesize_ratio_range})'
return repr_str | 1,996,265,108,211,176,700 | str: a string that describes the module | xmmdet/core/anchor/anchor_generator.py | __repr__ | www516717402/edgeai-mmdetection | python | def __repr__(self):
indent_str = ' '
repr_str = (self.__class__.__name__ + '(\n')
repr_str += f'{indent_str}strides={self.strides},
'
repr_str += f'{indent_str}scales={self.scales},
'
repr_str += f'{indent_str}scale_major={self.scale_major},
'
repr_str += f'{indent_str}input_size={self.input_size},
'
repr_str += f'{indent_str}scales={self.scales},
'
repr_str += f'{indent_str}ratios={self.ratios},
'
repr_str += f'{indent_str}num_levels={self.num_levels},
'
repr_str += f'{indent_str}base_sizes={self.base_sizes},
'
repr_str += f'{indent_str}basesize_ratio_range='
repr_str += f'{self.basesize_ratio_range})'
return repr_str |
def __init__(self, simulator):
'Instantiate a Display (thas is: a window with a display widget and\n simulation controls) from a simulator.\n\n :param simulator: The simulator to use.'
self.simulator = simulator
if (not self.display):
if ('tiles' in self.simulator.palette_info):
self.display = NewDisplayWidget(self.simulator)
else:
self.display = DisplayWidget(self.simulator)
if (self.control is None):
self.control = ControlWidget(self.simulator)
from .mainwin import ZasimMainWindow
self.window = ZasimMainWindow(self.simulator, self.display, self.control)
display_objects.append(self.window)
self.window.show() | 1,797,124,596,122,620,200 | Instantiate a Display (thas is: a window with a display widget and
simulation controls) from a simulator.
:param simulator: The simulator to use. | zasim/gui/display.py | __init__ | timo/zasim | python | def __init__(self, simulator):
'Instantiate a Display (thas is: a window with a display widget and\n simulation controls) from a simulator.\n\n :param simulator: The simulator to use.'
self.simulator = simulator
if (not self.display):
if ('tiles' in self.simulator.palette_info):
self.display = NewDisplayWidget(self.simulator)
else:
self.display = DisplayWidget(self.simulator)
if (self.control is None):
self.control = ControlWidget(self.simulator)
from .mainwin import ZasimMainWindow
self.window = ZasimMainWindow(self.simulator, self.display, self.control)
display_objects.append(self.window)
self.window.show() |
def set_scale(self, scale):
'Sets the scale of the display component.'
self.display.set_scale(scale) | -6,195,129,771,046,029,000 | Sets the scale of the display component. | zasim/gui/display.py | set_scale | timo/zasim | python | def set_scale(self, scale):
self.display.set_scale(scale) |
def resize_images_3d(x, output_shape):
'Resize images to the given shape.\n This function resizes 3D data to :obj:`output_shape`.\n Currently, only bilinear interpolation is supported as the sampling method.\n Notatition: here is a notation for dimensionalities.\n - :math:`n` is the batch size.\n - :math:`c_I` is the number of the input channels.\n - :math:`h`, :math:`w` and :math:`d` are the height, width and depth of the\n input image, respectively.\n - :math:`h_O`, :math:`w_O` and :math:`d_0` are the height, width and depth\n of the output image.\n Args:\n x (~chainer.Variable):\n Input variable of shape :math:`(n, c_I, h, w, d)`.\n output_shape (tuple):\n This is a tuple of length 3 whose values are :obj:`(h_O, w_O, d_O)`.\n Returns:\n ~chainer.Variable: Resized image whose shape is :math:`(n, c_I, h_O, w_O, d_O)`.\n '
return ResizeImages3D(output_shape).apply((x,))[0] | -1,313,251,346,635,617,300 | Resize images to the given shape.
This function resizes 3D data to :obj:`output_shape`.
Currently, only bilinear interpolation is supported as the sampling method.
Notatition: here is a notation for dimensionalities.
- :math:`n` is the batch size.
- :math:`c_I` is the number of the input channels.
- :math:`h`, :math:`w` and :math:`d` are the height, width and depth of the
input image, respectively.
- :math:`h_O`, :math:`w_O` and :math:`d_0` are the height, width and depth
of the output image.
Args:
x (~chainer.Variable):
Input variable of shape :math:`(n, c_I, h, w, d)`.
output_shape (tuple):
This is a tuple of length 3 whose values are :obj:`(h_O, w_O, d_O)`.
Returns:
~chainer.Variable: Resized image whose shape is :math:`(n, c_I, h_O, w_O, d_O)`. | src/links/model/resize_images_3d.py | resize_images_3d | pfnet-research/label-efficient-brain-tumor-segmentation | python | def resize_images_3d(x, output_shape):
'Resize images to the given shape.\n This function resizes 3D data to :obj:`output_shape`.\n Currently, only bilinear interpolation is supported as the sampling method.\n Notatition: here is a notation for dimensionalities.\n - :math:`n` is the batch size.\n - :math:`c_I` is the number of the input channels.\n - :math:`h`, :math:`w` and :math:`d` are the height, width and depth of the\n input image, respectively.\n - :math:`h_O`, :math:`w_O` and :math:`d_0` are the height, width and depth\n of the output image.\n Args:\n x (~chainer.Variable):\n Input variable of shape :math:`(n, c_I, h, w, d)`.\n output_shape (tuple):\n This is a tuple of length 3 whose values are :obj:`(h_O, w_O, d_O)`.\n Returns:\n ~chainer.Variable: Resized image whose shape is :math:`(n, c_I, h_O, w_O, d_O)`.\n '
return ResizeImages3D(output_shape).apply((x,))[0] |
def server_extensions_handshake(requested: List[str], supported: List[Extension]) -> Optional[bytes]:
'Agree on the extensions to use returning an appropriate header value.\n\n This returns None if there are no agreed extensions\n '
accepts: Dict[(str, Union[(bool, bytes)])] = {}
for offer in requested:
name = offer.split(';', 1)[0].strip()
for extension in supported:
if (extension.name == name):
accept = extension.accept(offer)
if (accept is True):
accepts[extension.name] = True
elif ((accept is not False) and (accept is not None)):
accepts[extension.name] = accept.encode('ascii')
if accepts:
extensions: List[bytes] = []
for (name, params) in accepts.items():
name = name.encode('ascii')
if (params is True):
extensions.append(name)
elif (params == b''):
extensions.append((b'%s' % name))
else:
extensions.append((b'%s; %s' % (name, params)))
return b', '.join(extensions)
return None | -5,065,775,016,215,352,000 | Agree on the extensions to use returning an appropriate header value.
This returns None if there are no agreed extensions | wsproto/handshake.py | server_extensions_handshake | bluetech/wsproto | python | def server_extensions_handshake(requested: List[str], supported: List[Extension]) -> Optional[bytes]:
'Agree on the extensions to use returning an appropriate header value.\n\n This returns None if there are no agreed extensions\n '
accepts: Dict[(str, Union[(bool, bytes)])] = {}
for offer in requested:
name = offer.split(';', 1)[0].strip()
for extension in supported:
if (extension.name == name):
accept = extension.accept(offer)
if (accept is True):
accepts[extension.name] = True
elif ((accept is not False) and (accept is not None)):
accepts[extension.name] = accept.encode('ascii')
if accepts:
extensions: List[bytes] = []
for (name, params) in accepts.items():
name = name.encode('ascii')
if (params is True):
extensions.append(name)
elif (params == b):
extensions.append((b'%s' % name))
else:
extensions.append((b'%s; %s' % (name, params)))
return b', '.join(extensions)
return None |
@property
def connection(self) -> Optional[Connection]:
'Return the established connection.\n\n This will either return the connection or raise a\n LocalProtocolError if the connection has not yet been\n established.\n\n :rtype: h11.Connection\n '
return self._connection | -79,934,593,187,587,360 | Return the established connection.
This will either return the connection or raise a
LocalProtocolError if the connection has not yet been
established.
:rtype: h11.Connection | wsproto/handshake.py | connection | bluetech/wsproto | python | @property
def connection(self) -> Optional[Connection]:
'Return the established connection.\n\n This will either return the connection or raise a\n LocalProtocolError if the connection has not yet been\n established.\n\n :rtype: h11.Connection\n '
return self._connection |
def initiate_upgrade_connection(self, headers: Headers, path: str) -> None:
'Initiate an upgrade connection.\n\n This should be used if the request has already be received and\n parsed.\n\n :param list headers: HTTP headers represented as a list of 2-tuples.\n :param str path: A URL path.\n '
if self.client:
raise LocalProtocolError('Cannot initiate an upgrade connection when acting as the client')
upgrade_request = h11.Request(method=b'GET', target=path, headers=headers)
h11_client = h11.Connection(h11.CLIENT)
self.receive_data(h11_client.send(upgrade_request)) | -7,749,412,552,793,166,000 | Initiate an upgrade connection.
This should be used if the request has already be received and
parsed.
:param list headers: HTTP headers represented as a list of 2-tuples.
:param str path: A URL path. | wsproto/handshake.py | initiate_upgrade_connection | bluetech/wsproto | python | def initiate_upgrade_connection(self, headers: Headers, path: str) -> None:
'Initiate an upgrade connection.\n\n This should be used if the request has already be received and\n parsed.\n\n :param list headers: HTTP headers represented as a list of 2-tuples.\n :param str path: A URL path.\n '
if self.client:
raise LocalProtocolError('Cannot initiate an upgrade connection when acting as the client')
upgrade_request = h11.Request(method=b'GET', target=path, headers=headers)
h11_client = h11.Connection(h11.CLIENT)
self.receive_data(h11_client.send(upgrade_request)) |
def send(self, event: Event) -> bytes:
'Send an event to the remote.\n\n This will return the bytes to send based on the event or raise\n a LocalProtocolError if the event is not valid given the\n state.\n\n :returns: Data to send to the WebSocket peer.\n :rtype: bytes\n '
data = b''
if isinstance(event, Request):
data += self._initiate_connection(event)
elif isinstance(event, AcceptConnection):
data += self._accept(event)
elif isinstance(event, RejectConnection):
data += self._reject(event)
elif isinstance(event, RejectData):
data += self._send_reject_data(event)
else:
raise LocalProtocolError('Event {} cannot be sent during the handshake'.format(event))
return data | -7,001,928,548,610,590,000 | Send an event to the remote.
This will return the bytes to send based on the event or raise
a LocalProtocolError if the event is not valid given the
state.
:returns: Data to send to the WebSocket peer.
:rtype: bytes | wsproto/handshake.py | send | bluetech/wsproto | python | def send(self, event: Event) -> bytes:
'Send an event to the remote.\n\n This will return the bytes to send based on the event or raise\n a LocalProtocolError if the event is not valid given the\n state.\n\n :returns: Data to send to the WebSocket peer.\n :rtype: bytes\n '
data = b
if isinstance(event, Request):
data += self._initiate_connection(event)
elif isinstance(event, AcceptConnection):
data += self._accept(event)
elif isinstance(event, RejectConnection):
data += self._reject(event)
elif isinstance(event, RejectData):
data += self._send_reject_data(event)
else:
raise LocalProtocolError('Event {} cannot be sent during the handshake'.format(event))
return data |
def receive_data(self, data: bytes) -> None:
'Receive data from the remote.\n\n A list of events that the remote peer triggered by sending\n this data can be retrieved with :meth:`events`.\n\n :param bytes data: Data received from the WebSocket peer.\n '
self._h11_connection.receive_data(data)
while True:
try:
event = self._h11_connection.next_event()
except h11.RemoteProtocolError:
raise RemoteProtocolError('Bad HTTP message', event_hint=RejectConnection())
if (isinstance(event, h11.ConnectionClosed) or (event is h11.NEED_DATA) or (event is h11.PAUSED)):
break
if self.client:
if isinstance(event, h11.InformationalResponse):
if (event.status_code == 101):
self._events.append(self._establish_client_connection(event))
else:
self._events.append(RejectConnection(headers=event.headers, status_code=event.status_code, has_body=False))
self._state = ConnectionState.CLOSED
elif isinstance(event, h11.Response):
self._state = ConnectionState.REJECTING
self._events.append(RejectConnection(headers=event.headers, status_code=event.status_code, has_body=True))
elif isinstance(event, h11.Data):
self._events.append(RejectData(data=event.data, body_finished=False))
elif isinstance(event, h11.EndOfMessage):
self._events.append(RejectData(data=b'', body_finished=True))
self._state = ConnectionState.CLOSED
elif isinstance(event, h11.Request):
self._events.append(self._process_connection_request(event)) | 8,708,793,808,222,445,000 | Receive data from the remote.
A list of events that the remote peer triggered by sending
this data can be retrieved with :meth:`events`.
:param bytes data: Data received from the WebSocket peer. | wsproto/handshake.py | receive_data | bluetech/wsproto | python | def receive_data(self, data: bytes) -> None:
'Receive data from the remote.\n\n A list of events that the remote peer triggered by sending\n this data can be retrieved with :meth:`events`.\n\n :param bytes data: Data received from the WebSocket peer.\n '
self._h11_connection.receive_data(data)
while True:
try:
event = self._h11_connection.next_event()
except h11.RemoteProtocolError:
raise RemoteProtocolError('Bad HTTP message', event_hint=RejectConnection())
if (isinstance(event, h11.ConnectionClosed) or (event is h11.NEED_DATA) or (event is h11.PAUSED)):
break
if self.client:
if isinstance(event, h11.InformationalResponse):
if (event.status_code == 101):
self._events.append(self._establish_client_connection(event))
else:
self._events.append(RejectConnection(headers=event.headers, status_code=event.status_code, has_body=False))
self._state = ConnectionState.CLOSED
elif isinstance(event, h11.Response):
self._state = ConnectionState.REJECTING
self._events.append(RejectConnection(headers=event.headers, status_code=event.status_code, has_body=True))
elif isinstance(event, h11.Data):
self._events.append(RejectData(data=event.data, body_finished=False))
elif isinstance(event, h11.EndOfMessage):
self._events.append(RejectData(data=b, body_finished=True))
self._state = ConnectionState.CLOSED
elif isinstance(event, h11.Request):
self._events.append(self._process_connection_request(event)) |
def events(self) -> Generator[(Event, None, None)]:
'Return a generator that provides any events that have been generated\n by protocol activity.\n\n :returns: a generator that yields H11 events.\n '
while self._events:
(yield self._events.popleft()) | -4,374,321,370,451,939,300 | Return a generator that provides any events that have been generated
by protocol activity.
:returns: a generator that yields H11 events. | wsproto/handshake.py | events | bluetech/wsproto | python | def events(self) -> Generator[(Event, None, None)]:
'Return a generator that provides any events that have been generated\n by protocol activity.\n\n :returns: a generator that yields H11 events.\n '
while self._events:
(yield self._events.popleft()) |
def __init__(self, model_name, device='CPU', extensions=None, async_infer=True):
'\n Set instance variables.\n '
self.plugin = None
self.network = None
self.exec_network = None
self.infer_request_handle = None
self.input_blob = None
self.input_shape = None
self.output_blob = None
self.output_shape = None
self.model_name = model_name
self.device = device
self.extensions = extensions
self.async_infer = async_infer | 4,609,530,129,871,315,000 | Set instance variables. | src/facial_landmarks_detection.py | __init__ | ElisaCovato/Computer-pointer-controller---Intel-Edge-AI-Nanodegree | python | def __init__(self, model_name, device='CPU', extensions=None, async_infer=True):
'\n \n '
self.plugin = None
self.network = None
self.exec_network = None
self.infer_request_handle = None
self.input_blob = None
self.input_shape = None
self.output_blob = None
self.output_shape = None
self.model_name = model_name
self.device = device
self.extensions = extensions
self.async_infer = async_infer |
def load_model(self, plugin):
'\n This method is for loading the model (in IR format) to the device specified by the user.\n Default device is CPU.\n '
model_structure = (self.model_name + '.xml')
model_weights = (self.model_name + '.bin')
self.plugin = plugin
if (self.extensions and ('CPU' in self.device)):
self.plugin.add_extension(self.extensions, self.device)
try:
self.network = self.plugin.read_network(model=model_structure, weights=model_weights)
except:
raise ValueError('Could not initialise the network. Have you entered the correct model path?')
if (self.device == 'CPU'):
self.check_model()
self.exec_network = self.plugin.load_network(network=self.network, device_name=self.device, num_requests=1)
self.input_blob = next(iter(self.network.inputs))
self.input_shape = self.network.inputs[self.input_blob].shape
self.output_blob = next(iter(self.network.outputs))
self.output_shape = self.network.outputs[self.output_blob].shape
return | 6,192,489,219,267,985,000 | This method is for loading the model (in IR format) to the device specified by the user.
Default device is CPU. | src/facial_landmarks_detection.py | load_model | ElisaCovato/Computer-pointer-controller---Intel-Edge-AI-Nanodegree | python | def load_model(self, plugin):
'\n This method is for loading the model (in IR format) to the device specified by the user.\n Default device is CPU.\n '
model_structure = (self.model_name + '.xml')
model_weights = (self.model_name + '.bin')
self.plugin = plugin
if (self.extensions and ('CPU' in self.device)):
self.plugin.add_extension(self.extensions, self.device)
try:
self.network = self.plugin.read_network(model=model_structure, weights=model_weights)
except:
raise ValueError('Could not initialise the network. Have you entered the correct model path?')
if (self.device == 'CPU'):
self.check_model()
self.exec_network = self.plugin.load_network(network=self.network, device_name=self.device, num_requests=1)
self.input_blob = next(iter(self.network.inputs))
self.input_shape = self.network.inputs[self.input_blob].shape
self.output_blob = next(iter(self.network.outputs))
self.output_shape = self.network.outputs[self.output_blob].shape
return |
def predict(self, image):
'\n This method is meant for running predictions on the input image.\n '
if np.all(np.array(image.shape)):
net_input = {self.input_blob: self.preprocess_input(image)}
if self.async_infer:
self.infer_request_handle = self.exec_network.start_async(request_id=0, inputs=net_input)
if (self.exec_network.requests[0].wait((- 1)) == 0):
outputs = self.infer_request_handle.outputs[self.output_blob]
(eyes_coords, crop_left, crop_right) = self.preprocess_output(outputs, image)
else:
self.infer_request_handle = self.exec_network.infer(inputs=net_input)
outputs = self.infer_request_handle[self.output_blob]
(eyes_coords, crop_left, crop_right) = self.preprocess_output(outputs, image)
else:
eyes_coords = []
crop_left = []
crop_right = []
return (eyes_coords, crop_left, crop_right) | -697,507,850,356,550,300 | This method is meant for running predictions on the input image. | src/facial_landmarks_detection.py | predict | ElisaCovato/Computer-pointer-controller---Intel-Edge-AI-Nanodegree | python | def predict(self, image):
'\n \n '
if np.all(np.array(image.shape)):
net_input = {self.input_blob: self.preprocess_input(image)}
if self.async_infer:
self.infer_request_handle = self.exec_network.start_async(request_id=0, inputs=net_input)
if (self.exec_network.requests[0].wait((- 1)) == 0):
outputs = self.infer_request_handle.outputs[self.output_blob]
(eyes_coords, crop_left, crop_right) = self.preprocess_output(outputs, image)
else:
self.infer_request_handle = self.exec_network.infer(inputs=net_input)
outputs = self.infer_request_handle[self.output_blob]
(eyes_coords, crop_left, crop_right) = self.preprocess_output(outputs, image)
else:
eyes_coords = []
crop_left = []
crop_right = []
return (eyes_coords, crop_left, crop_right) |
def check_model(self):
'\n This method check whether the model (along with the plugin) is support on the CPU device.\n If anything is missing (such as a CPU extension), let the user know and exit the programm.\n '
supported_layers = self.plugin.query_network(network=self.network, device_name='CPU')
unsupported_layers = [l for l in self.network.layers.keys() if (l not in supported_layers)]
if (len(unsupported_layers) != 0):
log.error('Unsupported layers found: {}'.format(unsupported_layers))
if self.extensions:
log.error('The extensions specified do not support some layers. Please specify a new extension.')
else:
log.error('Please try to specify an extension library path by using the --extensions command line argument.')
sys.exit(1)
return | -3,086,004,535,256,337,000 | This method check whether the model (along with the plugin) is support on the CPU device.
If anything is missing (such as a CPU extension), let the user know and exit the programm. | src/facial_landmarks_detection.py | check_model | ElisaCovato/Computer-pointer-controller---Intel-Edge-AI-Nanodegree | python | def check_model(self):
'\n This method check whether the model (along with the plugin) is support on the CPU device.\n If anything is missing (such as a CPU extension), let the user know and exit the programm.\n '
supported_layers = self.plugin.query_network(network=self.network, device_name='CPU')
unsupported_layers = [l for l in self.network.layers.keys() if (l not in supported_layers)]
if (len(unsupported_layers) != 0):
log.error('Unsupported layers found: {}'.format(unsupported_layers))
if self.extensions:
log.error('The extensions specified do not support some layers. Please specify a new extension.')
else:
log.error('Please try to specify an extension library path by using the --extensions command line argument.')
sys.exit(1)
return |
def preprocess_input(self, image):
'\n Method to process inputs before feeding them into the model for inference.\n '
image = cv2.resize(image, (self.input_shape[3], self.input_shape[2]))
image = image.transpose((2, 0, 1))
image = image.reshape(1, *image.shape)
return image | 7,479,949,119,213,668,000 | Method to process inputs before feeding them into the model for inference. | src/facial_landmarks_detection.py | preprocess_input | ElisaCovato/Computer-pointer-controller---Intel-Edge-AI-Nanodegree | python | def preprocess_input(self, image):
'\n \n '
image = cv2.resize(image, (self.input_shape[3], self.input_shape[2]))
image = image.transpose((2, 0, 1))
image = image.reshape(1, *image.shape)
return image |
def preprocess_output(self, outputs, image):
'\n Method to process outputs before feeding them into the next model for\n inference or for the last step of the app.\n '
w = image.shape[1]
h = image.shape[0]
outputs = outputs[0]
(xl, yl) = (int((outputs[0][0][0] * w)), int((outputs[1][0][0] * h)))
(xr, yr) = (int((outputs[2][0][0] * w)), int((outputs[3][0][0] * h)))
eyes_coords = [xl, yl, xr, yr]
square_size = int((w / 10))
left_eye_box = [(xl - square_size), (yl - square_size), (xl + square_size), (yl + square_size)]
right_eye_box = [(xr - square_size), (yr - square_size), (xr + square_size), (yr + square_size)]
crop_left = image[left_eye_box[1]:left_eye_box[3], left_eye_box[0]:left_eye_box[2]]
crop_right = image[right_eye_box[1]:right_eye_box[3], right_eye_box[0]:right_eye_box[2]]
return (eyes_coords, crop_left, crop_right) | -7,138,546,993,679,951,000 | Method to process outputs before feeding them into the next model for
inference or for the last step of the app. | src/facial_landmarks_detection.py | preprocess_output | ElisaCovato/Computer-pointer-controller---Intel-Edge-AI-Nanodegree | python | def preprocess_output(self, outputs, image):
'\n Method to process outputs before feeding them into the next model for\n inference or for the last step of the app.\n '
w = image.shape[1]
h = image.shape[0]
outputs = outputs[0]
(xl, yl) = (int((outputs[0][0][0] * w)), int((outputs[1][0][0] * h)))
(xr, yr) = (int((outputs[2][0][0] * w)), int((outputs[3][0][0] * h)))
eyes_coords = [xl, yl, xr, yr]
square_size = int((w / 10))
left_eye_box = [(xl - square_size), (yl - square_size), (xl + square_size), (yl + square_size)]
right_eye_box = [(xr - square_size), (yr - square_size), (xr + square_size), (yr + square_size)]
crop_left = image[left_eye_box[1]:left_eye_box[3], left_eye_box[0]:left_eye_box[2]]
crop_right = image[right_eye_box[1]:right_eye_box[3], right_eye_box[0]:right_eye_box[2]]
return (eyes_coords, crop_left, crop_right) |
def start_reconnect_task(server: StorServer, peer_info_arg: PeerInfo, log, auth: bool):
'\n Start a background task that checks connection and reconnects periodically to a peer.\n '
if peer_info_arg.is_valid():
peer_info = peer_info_arg
else:
peer_info = PeerInfo(socket.gethostbyname(peer_info_arg.host), peer_info_arg.port)
async def connection_check():
while True:
peer_retry = True
for (_, connection) in server.all_connections.items():
if ((connection.get_peer_info() == peer_info) or (connection.get_peer_info() == peer_info_arg)):
peer_retry = False
if peer_retry:
log.info(f'Reconnecting to peer {peer_info}')
try:
(await server.start_client(peer_info, None, auth=auth))
except Exception as e:
log.info(f'Failed to connect to {peer_info} {e}')
(await asyncio.sleep(3))
return asyncio.create_task(connection_check()) | -1,791,811,104,352,758,000 | Start a background task that checks connection and reconnects periodically to a peer. | stor/server/reconnect_task.py | start_reconnect_task | Stor-Network/stor-blockchain | python | def start_reconnect_task(server: StorServer, peer_info_arg: PeerInfo, log, auth: bool):
'\n \n '
if peer_info_arg.is_valid():
peer_info = peer_info_arg
else:
peer_info = PeerInfo(socket.gethostbyname(peer_info_arg.host), peer_info_arg.port)
async def connection_check():
while True:
peer_retry = True
for (_, connection) in server.all_connections.items():
if ((connection.get_peer_info() == peer_info) or (connection.get_peer_info() == peer_info_arg)):
peer_retry = False
if peer_retry:
log.info(f'Reconnecting to peer {peer_info}')
try:
(await server.start_client(peer_info, None, auth=auth))
except Exception as e:
log.info(f'Failed to connect to {peer_info} {e}')
(await asyncio.sleep(3))
return asyncio.create_task(connection_check()) |
def configure(config={}, nested=False, cache=None):
"Useful for when you need to control Switchboard's setup."
if nested:
config = nested_config(config)
Settings.init(cache=cache, **config)
operator.cache = cache
timeout = getattr(settings, 'SWITCHBOARD_TIMEOUT', 10)
dburl = settings.SWITCHBOARD_DBURL
if dburl:
engine = sqla.create_engine(dburl, connect_args={'connect_timeout': timeout})
Switch.store = SQLAlchemyStore(engine, settings.SWITCHBOARD_DBTABLE)
__import__('switchboard.builtins') | -2,602,632,946,951,776,000 | Useful for when you need to control Switchboard's setup. | switchboard/manager.py | configure | juju/switchboard | python | def configure(config={}, nested=False, cache=None):
if nested:
config = nested_config(config)
Settings.init(cache=cache, **config)
operator.cache = cache
timeout = getattr(settings, 'SWITCHBOARD_TIMEOUT', 10)
dburl = settings.SWITCHBOARD_DBURL
if dburl:
engine = sqla.create_engine(dburl, connect_args={'connect_timeout': timeout})
Switch.store = SQLAlchemyStore(engine, settings.SWITCHBOARD_DBTABLE)
__import__('switchboard.builtins') |
def __getitem__(self, key):
'\n Returns a SwitchProxy, rather than a Switch. It allows us to\n easily extend the Switches method and automatically include our\n manager instance.\n '
return SwitchProxy(self, super(SwitchManager, self).__getitem__(key)) | 1,561,878,215,264,609,300 | Returns a SwitchProxy, rather than a Switch. It allows us to
easily extend the Switches method and automatically include our
manager instance. | switchboard/manager.py | __getitem__ | juju/switchboard | python | def __getitem__(self, key):
'\n Returns a SwitchProxy, rather than a Switch. It allows us to\n easily extend the Switches method and automatically include our\n manager instance.\n '
return SwitchProxy(self, super(SwitchManager, self).__getitem__(key)) |
def with_result_cache(func):
'\n Decorator specifically for is_active. If self.result_cache is set to a {}\n the is_active results will be cached for each set of params.\n '
def inner(self, *args, **kwargs):
dic = self.result_cache
cache_key = None
if (dic is not None):
cache_key = (args, tuple(kwargs.items()))
try:
result = dic.get(cache_key)
except TypeError as e:
log.debug('Switchboard result cache not active for this "%s" check due to: %s within args: %s', args[0], e, repr(cache_key)[:200])
cache_key = None
else:
if (result is not None):
return result
result = func(self, *args, **kwargs)
if (cache_key is not None):
dic[cache_key] = result
return result
return inner | -5,442,204,369,891,342,000 | Decorator specifically for is_active. If self.result_cache is set to a {}
the is_active results will be cached for each set of params. | switchboard/manager.py | with_result_cache | juju/switchboard | python | def with_result_cache(func):
'\n Decorator specifically for is_active. If self.result_cache is set to a {}\n the is_active results will be cached for each set of params.\n '
def inner(self, *args, **kwargs):
dic = self.result_cache
cache_key = None
if (dic is not None):
cache_key = (args, tuple(kwargs.items()))
try:
result = dic.get(cache_key)
except TypeError as e:
log.debug('Switchboard result cache not active for this "%s" check due to: %s within args: %s', args[0], e, repr(cache_key)[:200])
cache_key = None
else:
if (result is not None):
return result
result = func(self, *args, **kwargs)
if (cache_key is not None):
dic[cache_key] = result
return result
return inner |
@with_result_cache
def is_active(self, key, *instances, **kwargs):
"\n Returns ``True`` if any of ``instances`` match an active switch.\n Otherwise returns ``False``.\n\n >>> operator.is_active('my_feature', request) #doctest: +SKIP\n "
try:
default = kwargs.pop('default', False)
parts = key.split(':')
if (len(parts) > 1):
child_kwargs = kwargs.copy()
child_kwargs['default'] = None
result = self.is_active(':'.join(parts[:(- 1)]), *instances, **child_kwargs)
if (result is False):
return result
elif (result is True):
default = result
try:
switch = self[key]
except KeyError:
return default
if (switch.status == GLOBAL):
return True
elif (switch.status == DISABLED):
return False
elif (switch.status == INHERIT):
return default
conditions = switch.value
if (not conditions):
return default
instances = (list(instances) if instances else [])
instances.extend(self.context.values())
return_value = False
for (namespace, condition) in conditions.iteritems():
condition_set = registry_by_namespace.get(namespace)
if (not condition_set):
continue
result = condition_set.has_active_condition(condition, instances)
if (result is False):
return False
elif (result is True):
return_value = True
except:
log.exception('Error checking if switch "%s" is active', key)
return_value = False
return return_value | 4,146,366,985,063,664,600 | Returns ``True`` if any of ``instances`` match an active switch.
Otherwise returns ``False``.
>>> operator.is_active('my_feature', request) #doctest: +SKIP | switchboard/manager.py | is_active | juju/switchboard | python | @with_result_cache
def is_active(self, key, *instances, **kwargs):
"\n Returns ``True`` if any of ``instances`` match an active switch.\n Otherwise returns ``False``.\n\n >>> operator.is_active('my_feature', request) #doctest: +SKIP\n "
try:
default = kwargs.pop('default', False)
parts = key.split(':')
if (len(parts) > 1):
child_kwargs = kwargs.copy()
child_kwargs['default'] = None
result = self.is_active(':'.join(parts[:(- 1)]), *instances, **child_kwargs)
if (result is False):
return result
elif (result is True):
default = result
try:
switch = self[key]
except KeyError:
return default
if (switch.status == GLOBAL):
return True
elif (switch.status == DISABLED):
return False
elif (switch.status == INHERIT):
return default
conditions = switch.value
if (not conditions):
return default
instances = (list(instances) if instances else [])
instances.extend(self.context.values())
return_value = False
for (namespace, condition) in conditions.iteritems():
condition_set = registry_by_namespace.get(namespace)
if (not condition_set):
continue
result = condition_set.has_active_condition(condition, instances)
if (result is False):
return False
elif (result is True):
return_value = True
except:
log.exception('Error checking if switch "%s" is active', key)
return_value = False
return return_value |
def register(self, condition_set):
'\n Registers a condition set with the manager.\n\n >>> condition_set = MyConditionSet() #doctest: +SKIP\n >>> operator.register(condition_set) #doctest: +SKIP\n '
if callable(condition_set):
condition_set = condition_set()
registry[condition_set.get_id()] = condition_set
registry_by_namespace[condition_set.get_namespace()] = condition_set | -8,700,801,431,450,779,000 | Registers a condition set with the manager.
>>> condition_set = MyConditionSet() #doctest: +SKIP
>>> operator.register(condition_set) #doctest: +SKIP | switchboard/manager.py | register | juju/switchboard | python | def register(self, condition_set):
'\n Registers a condition set with the manager.\n\n >>> condition_set = MyConditionSet() #doctest: +SKIP\n >>> operator.register(condition_set) #doctest: +SKIP\n '
if callable(condition_set):
condition_set = condition_set()
registry[condition_set.get_id()] = condition_set
registry_by_namespace[condition_set.get_namespace()] = condition_set |
def unregister(self, condition_set):
'\n Unregisters a condition set with the manager.\n\n >>> operator.unregister(condition_set) #doctest: +SKIP\n '
if callable(condition_set):
condition_set = condition_set()
registry.pop(condition_set.get_id(), None)
registry_by_namespace.pop(condition_set.get_namespace(), None) | -3,110,375,665,669,518,000 | Unregisters a condition set with the manager.
>>> operator.unregister(condition_set) #doctest: +SKIP | switchboard/manager.py | unregister | juju/switchboard | python | def unregister(self, condition_set):
'\n Unregisters a condition set with the manager.\n\n >>> operator.unregister(condition_set) #doctest: +SKIP\n '
if callable(condition_set):
condition_set = condition_set()
registry.pop(condition_set.get_id(), None)
registry_by_namespace.pop(condition_set.get_namespace(), None) |
def get_condition_set_by_id(self, switch_id):
'\n Given the identifier of a condition set (described in\n ConditionSet.get_id()), returns the registered instance.\n '
return registry[switch_id] | -4,491,633,507,980,078,000 | Given the identifier of a condition set (described in
ConditionSet.get_id()), returns the registered instance. | switchboard/manager.py | get_condition_set_by_id | juju/switchboard | python | def get_condition_set_by_id(self, switch_id):
'\n Given the identifier of a condition set (described in\n ConditionSet.get_id()), returns the registered instance.\n '
return registry[switch_id] |
def get_condition_sets(self):
'\n Returns a generator yielding all currently registered\n ConditionSet instances.\n '
return registry.itervalues() | -4,830,379,080,715,123,000 | Returns a generator yielding all currently registered
ConditionSet instances. | switchboard/manager.py | get_condition_sets | juju/switchboard | python | def get_condition_sets(self):
'\n Returns a generator yielding all currently registered\n ConditionSet instances.\n '
return registry.itervalues() |
def get_all_conditions(self):
'\n Returns a generator which yields groups of lists of conditions.\n\n >>> for set_id, label, field in operator.get_all_conditions(): #doctest: +SKIP\n >>> print "%(label)s: %(field)s" % (label, field.label) #doctest: +SKIP\n '
cs = self.get_condition_sets()
for condition_set in sorted(cs, key=(lambda x: x.get_group_label())):
group = unicode(condition_set.get_group_label())
for field in condition_set.fields.itervalues():
(yield (condition_set.get_id(), group, field)) | -4,256,478,779,523,565,000 | Returns a generator which yields groups of lists of conditions.
>>> for set_id, label, field in operator.get_all_conditions(): #doctest: +SKIP
>>> print "%(label)s: %(field)s" % (label, field.label) #doctest: +SKIP | switchboard/manager.py | get_all_conditions | juju/switchboard | python | def get_all_conditions(self):
'\n Returns a generator which yields groups of lists of conditions.\n\n >>> for set_id, label, field in operator.get_all_conditions(): #doctest: +SKIP\n >>> print "%(label)s: %(field)s" % (label, field.label) #doctest: +SKIP\n '
cs = self.get_condition_sets()
for condition_set in sorted(cs, key=(lambda x: x.get_group_label())):
group = unicode(condition_set.get_group_label())
for field in condition_set.fields.itervalues():
(yield (condition_set.get_id(), group, field)) |
def get_output_dir(imdb_name, net_name=None, output_dir='output'):
'Return the directory where experimental artifacts are placed.\n If the directory does not exist, it is created.\n\n A canonical path is built using the name from an imdb and a network\n (if not None).\n '
outdir = osp.abspath(osp.join(cfg.ROOT_DIR, output_dir, cfg.EXP_DIR, imdb_name))
if (net_name is not None):
outdir = osp.join(outdir, net_name)
if (not os.path.exists(outdir)):
os.makedirs(outdir)
return outdir | -8,190,657,062,051,350,000 | Return the directory where experimental artifacts are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None). | model/utils/config.py | get_output_dir | Juggernaut93/SSH-pytorch | python | def get_output_dir(imdb_name, net_name=None, output_dir='output'):
'Return the directory where experimental artifacts are placed.\n If the directory does not exist, it is created.\n\n A canonical path is built using the name from an imdb and a network\n (if not None).\n '
outdir = osp.abspath(osp.join(cfg.ROOT_DIR, output_dir, cfg.EXP_DIR, imdb_name))
if (net_name is not None):
outdir = osp.join(outdir, net_name)
if (not os.path.exists(outdir)):
os.makedirs(outdir)
return outdir |
def get_output_tb_dir(imdb, weights_filename):
'Return the directory where tensorflow summaries are placed.\n If the directory does not exist, it is created.\n A canonical path is built using the name from an imdb and a network\n (if not None).\n '
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'tensorboard', __C.EXP_DIR, imdb.name))
if (weights_filename is None):
weights_filename = 'default'
outdir = osp.join(outdir, weights_filename)
if (not os.path.exists(outdir)):
os.makedirs(outdir)
return outdir | -5,700,492,455,365,735,000 | Return the directory where tensorflow summaries are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None). | model/utils/config.py | get_output_tb_dir | Juggernaut93/SSH-pytorch | python | def get_output_tb_dir(imdb, weights_filename):
'Return the directory where tensorflow summaries are placed.\n If the directory does not exist, it is created.\n A canonical path is built using the name from an imdb and a network\n (if not None).\n '
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'tensorboard', __C.EXP_DIR, imdb.name))
if (weights_filename is None):
weights_filename = 'default'
outdir = osp.join(outdir, weights_filename)
if (not os.path.exists(outdir)):
os.makedirs(outdir)
return outdir |
def _merge_a_into_b(a, b):
'Merge config dictionary a into config dictionary b, clobbering the\n options in b whenever they are also specified in a.\n '
if (type(a) is not edict):
return
for (k, v) in a.items():
if (k not in b):
raise KeyError('{} is not a valid config key'.format(k))
old_type = type(b[k])
if (old_type is not type(v)):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError('Type mismatch ({} vs. {}) for config key: {}'.format(type(b[k]), type(v), k))
if (type(v) is edict):
try:
_merge_a_into_b(a[k], b[k])
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k] = v | -7,092,344,095,667,698,000 | Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a. | model/utils/config.py | _merge_a_into_b | Juggernaut93/SSH-pytorch | python | def _merge_a_into_b(a, b):
'Merge config dictionary a into config dictionary b, clobbering the\n options in b whenever they are also specified in a.\n '
if (type(a) is not edict):
return
for (k, v) in a.items():
if (k not in b):
raise KeyError('{} is not a valid config key'.format(k))
old_type = type(b[k])
if (old_type is not type(v)):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError('Type mismatch ({} vs. {}) for config key: {}'.format(type(b[k]), type(v), k))
if (type(v) is edict):
try:
_merge_a_into_b(a[k], b[k])
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k] = v |
def cfg_from_file(filename):
'Load a config file and merge it into the default options.'
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C) | 3,048,925,329,488,565,000 | Load a config file and merge it into the default options. | model/utils/config.py | cfg_from_file | Juggernaut93/SSH-pytorch | python | def cfg_from_file(filename):
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C) |
def cfg_from_list(cfg_list):
'Set config keys via list (e.g., from command line).'
from ast import literal_eval
assert ((len(cfg_list) % 2) == 0)
for (k, v) in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:(- 1)]:
assert (subkey in d)
d = d[subkey]
subkey = key_list[(- 1)]
assert (subkey in d)
try:
value = literal_eval(v)
except:
value = v
assert (type(value) == type(d[subkey])), 'type {} does not match original type {}'.format(type(value), type(d[subkey]))
d[subkey] = value | 1,336,896,622,455,959,300 | Set config keys via list (e.g., from command line). | model/utils/config.py | cfg_from_list | Juggernaut93/SSH-pytorch | python | def cfg_from_list(cfg_list):
from ast import literal_eval
assert ((len(cfg_list) % 2) == 0)
for (k, v) in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:(- 1)]:
assert (subkey in d)
d = d[subkey]
subkey = key_list[(- 1)]
assert (subkey in d)
try:
value = literal_eval(v)
except:
value = v
assert (type(value) == type(d[subkey])), 'type {} does not match original type {}'.format(type(value), type(d[subkey]))
d[subkey] = value |
def configure_output_dir(d=None):
'\n Set output directory to d, or to /tmp/somerandomnumber if d is None\n '
G.first_row = True
G.log_headers = []
G.log_current_row = {}
G.output_dir = (d or ('/tmp/experiments/%i' % int(time.time())))
if (not osp.exists(G.output_dir)):
os.makedirs(G.output_dir)
G.output_file = open(osp.join(G.output_dir, 'log.txt'), 'w')
atexit.register(G.output_file.close)
print(colorize(('Logging data to %s' % G.output_file.name), 'green', bold=True)) | 4,649,972,137,371,970,000 | Set output directory to d, or to /tmp/somerandomnumber if d is None | ADMCode/snuz/ars/logz.py | configure_output_dir | CoAxLab/AdaptiveDecisionMaking_2018 | python | def configure_output_dir(d=None):
'\n \n '
G.first_row = True
G.log_headers = []
G.log_current_row = {}
G.output_dir = (d or ('/tmp/experiments/%i' % int(time.time())))
if (not osp.exists(G.output_dir)):
os.makedirs(G.output_dir)
G.output_file = open(osp.join(G.output_dir, 'log.txt'), 'w')
atexit.register(G.output_file.close)
print(colorize(('Logging data to %s' % G.output_file.name), 'green', bold=True)) |
def log_tabular(key, val):
'\n Log a value of some diagnostic\n Call this once for each diagnostic quantity, each iteration\n '
if G.first_row:
G.log_headers.append(key)
else:
assert (key in G.log_headers), ("Trying to introduce a new key %s that you didn't include in the first iteration" % key)
assert (key not in G.log_current_row), ('You already set %s this iteration. Maybe you forgot to call dump_tabular()' % key)
G.log_current_row[key] = val | -4,924,882,616,239,801,000 | Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration | ADMCode/snuz/ars/logz.py | log_tabular | CoAxLab/AdaptiveDecisionMaking_2018 | python | def log_tabular(key, val):
'\n Log a value of some diagnostic\n Call this once for each diagnostic quantity, each iteration\n '
if G.first_row:
G.log_headers.append(key)
else:
assert (key in G.log_headers), ("Trying to introduce a new key %s that you didn't include in the first iteration" % key)
assert (key not in G.log_current_row), ('You already set %s this iteration. Maybe you forgot to call dump_tabular()' % key)
G.log_current_row[key] = val |
def dump_tabular():
'\n Write all of the diagnostics from the current iteration\n '
vals = []
key_lens = [len(key) for key in G.log_headers]
max_key_len = max(15, max(key_lens))
keystr = ('%' + ('%d' % max_key_len))
fmt = (('| ' + keystr) + 's | %15s |')
n_slashes = (22 + max_key_len)
print(('-' * n_slashes))
for key in G.log_headers:
val = G.log_current_row.get(key, '')
if hasattr(val, '__float__'):
valstr = ('%8.3g' % val)
else:
valstr = val
print((fmt % (key, valstr)))
vals.append(val)
print(('-' * n_slashes))
if (G.output_file is not None):
if G.first_row:
G.output_file.write('\t'.join(G.log_headers))
G.output_file.write('\n')
G.output_file.write('\t'.join(map(str, vals)))
G.output_file.write('\n')
G.output_file.flush()
G.log_current_row.clear()
G.first_row = False | 2,233,594,333,627,156,700 | Write all of the diagnostics from the current iteration | ADMCode/snuz/ars/logz.py | dump_tabular | CoAxLab/AdaptiveDecisionMaking_2018 | python | def dump_tabular():
'\n \n '
vals = []
key_lens = [len(key) for key in G.log_headers]
max_key_len = max(15, max(key_lens))
keystr = ('%' + ('%d' % max_key_len))
fmt = (('| ' + keystr) + 's | %15s |')
n_slashes = (22 + max_key_len)
print(('-' * n_slashes))
for key in G.log_headers:
val = G.log_current_row.get(key, )
if hasattr(val, '__float__'):
valstr = ('%8.3g' % val)
else:
valstr = val
print((fmt % (key, valstr)))
vals.append(val)
print(('-' * n_slashes))
if (G.output_file is not None):
if G.first_row:
G.output_file.write('\t'.join(G.log_headers))
G.output_file.write('\n')
G.output_file.write('\t'.join(map(str, vals)))
G.output_file.write('\n')
G.output_file.flush()
G.log_current_row.clear()
G.first_row = False |
def old_javascript_array(array):
"in case the browser doesn't support JSON, and to save some separator bytes"
array = list(array)
sepChar = ord(' ')
chars_used = set(''.join(array))
assert (('"' not in chars_used) and ('\\' not in chars_used) and ('<' not in chars_used) and ('&' not in chars_used)), "Can't use special chars (unless you change this code to escape them)"
while True:
if ((chr(sepChar) not in chars_used) and (not (chr(sepChar) in '\\"<&'))):
break
sepChar += 1
assert (sepChar < 127), "can't find a suitable separator char (hard-code the array instead?)"
return (((('"' + chr(sepChar).join(array)) + '".split("') + chr(sepChar)) + '")') | -8,124,362,927,457,646,000 | in case the browser doesn't support JSON, and to save some separator bytes | ohi.py | old_javascript_array | ssb22/indexer | python | def old_javascript_array(array):
array = list(array)
sepChar = ord(' ')
chars_used = set(.join(array))
assert (('"' not in chars_used) and ('\\' not in chars_used) and ('<' not in chars_used) and ('&' not in chars_used)), "Can't use special chars (unless you change this code to escape them)"
while True:
if ((chr(sepChar) not in chars_used) and (not (chr(sepChar) in '\\"<&'))):
break
sepChar += 1
assert (sepChar < 127), "can't find a suitable separator char (hard-code the array instead?)"
return (((('"' + chr(sepChar).join(array)) + '".split("') + chr(sepChar)) + '")') |
def htmlDoc(start, end, docNo):
'Returns an HTML document containing fragments[start:end]. docNo is used to generate previous/next page links as appropriate. Caches its return value in case called again with same start,end (in which case docNo is ignored on second call).'
global __lastStartEnd, __lastDoc
if (not ((start, end) == __lastStartEnd)):
__lastStartEnd = (start, end)
__lastDoc = (header + js_hashjump((x for (x, y) in fragments[start:end] if x)))
if start:
assert docNo, 'Document 0 should start at 0'
__lastDoc += ('<p><a name="_h" href="%d.html#_f">Previous page</a></p>' % ((docNo - 1),))
__lastDoc += ''.join(((tag(x) + y) for (x, y) in fragments[start:end]))
if (end < len(fragments)):
__lastDoc += ('<p><a name="_f" href="%d.html#_h">Next page</a></p>' % ((docNo + 1),))
__lastDoc += footer
return linkSub(__lastDoc) | -2,020,903,471,065,346,800 | Returns an HTML document containing fragments[start:end]. docNo is used to generate previous/next page links as appropriate. Caches its return value in case called again with same start,end (in which case docNo is ignored on second call). | ohi.py | htmlDoc | ssb22/indexer | python | def htmlDoc(start, end, docNo):
global __lastStartEnd, __lastDoc
if (not ((start, end) == __lastStartEnd)):
__lastStartEnd = (start, end)
__lastDoc = (header + js_hashjump((x for (x, y) in fragments[start:end] if x)))
if start:
assert docNo, 'Document 0 should start at 0'
__lastDoc += ('<p><a name="_h" href="%d.html#_f">Previous page</a></p>' % ((docNo - 1),))
__lastDoc += .join(((tag(x) + y) for (x, y) in fragments[start:end]))
if (end < len(fragments)):
__lastDoc += ('<p><a name="_f" href="%d.html#_h">Next page</a></p>' % ((docNo + 1),))
__lastDoc += footer
return linkSub(__lastDoc) |
def findEnd(start, docNo):
"Given 'start' (an index into 'fragments'), find an 'end' that produces the largest possible htmlDoc less than max_filesize. docNo is used to generate previous/next page links as appropriate."
eTry = (len(fragments) - start)
assert eTry, 'must start before the end'
sLen = len(htmlDoc(start, (start + eTry), docNo))
if (sLen > max_filesize):
eTry = int((eTry / int((sLen / max_filesize))))
while ((eTry > 1) and (len(htmlDoc(start, (start + eTry), docNo)) > max_filesize)):
eTry = int((eTry / 2))
if (eTry < 1):
eTry = 1
while ((eTry < (len(fragments) - start)) and (len(htmlDoc(start, (start + eTry), docNo)) < max_filesize)):
eTry += 1
return (start + max(1, (eTry - 1))) | -8,778,858,671,342,789,000 | Given 'start' (an index into 'fragments'), find an 'end' that produces the largest possible htmlDoc less than max_filesize. docNo is used to generate previous/next page links as appropriate. | ohi.py | findEnd | ssb22/indexer | python | def findEnd(start, docNo):
eTry = (len(fragments) - start)
assert eTry, 'must start before the end'
sLen = len(htmlDoc(start, (start + eTry), docNo))
if (sLen > max_filesize):
eTry = int((eTry / int((sLen / max_filesize))))
while ((eTry > 1) and (len(htmlDoc(start, (start + eTry), docNo)) > max_filesize)):
eTry = int((eTry / 2))
if (eTry < 1):
eTry = 1
while ((eTry < (len(fragments) - start)) and (len(htmlDoc(start, (start + eTry), docNo)) < max_filesize)):
eTry += 1
return (start + max(1, (eTry - 1))) |
def __call__(self, text):
'Find shortest prefix of text that differentiates it from previous item (empty string if no difference)'
assert (text >= self.lastText), 'input must have been properly sorted'
i = 0
for (c1, c2) in izip((self.lastText + chr(0)), text):
i += 1
if (not (c1 == c2)):
self.lastText = text
return text[:i]
assert (text == self.lastText), ((repr(text) + '!=') + repr(self.lastText))
return '' | -6,722,151,275,927,327,000 | Find shortest prefix of text that differentiates it from previous item (empty string if no difference) | ohi.py | __call__ | ssb22/indexer | python | def __call__(self, text):
assert (text >= self.lastText), 'input must have been properly sorted'
i = 0
for (c1, c2) in izip((self.lastText + chr(0)), text):
i += 1
if (not (c1 == c2)):
self.lastText = text
return text[:i]
assert (text == self.lastText), ((repr(text) + '!=') + repr(self.lastText))
return |
def ready(self):
'Override this to put in:\n Users system checks\n Users signal registration\n '
pass | 2,586,872,105,737,007,000 | Override this to put in:
Users system checks
Users signal registration | bookstudio/books/apps.py | ready | sudoabhinav/bookstudio | python | def ready(self):
'Override this to put in:\n Users system checks\n Users signal registration\n '
pass |
def __call__(self, num_update):
'Return a new learning rate based on number of updates.\n\n Parameters\n ----------\n num_update: nnvm Symbol\n the number of updates applied to weight.\n '
raise NotImplementedError('__call__ method must be overridden.') | 5,438,065,671,394,147,000 | Return a new learning rate based on number of updates.
Parameters
----------
num_update: nnvm Symbol
the number of updates applied to weight. | nnvm/python/nnvm/compiler/lr_scheduler.py | __call__ | 00liujj/tvm | python | def __call__(self, num_update):
'Return a new learning rate based on number of updates.\n\n Parameters\n ----------\n num_update: nnvm Symbol\n the number of updates applied to weight.\n '
raise NotImplementedError('__call__ method must be overridden.') |
def grep_core(media_file: str, add_prefix: bool, ffmpeg: FFmpeg, args, log: Log, TEMP: str) -> None:
"\n We're using the WEBVTT subtitle format. It's better than srt\n because it doesn't emit line numbers and the time code is in\n (hh:mm:ss.sss) instead of (dd:hh:mm:ss,sss)\n "
out_file = os.path.join(TEMP, 'media.vtt')
ffmpeg.run(['-i', media_file, out_file])
count = 0
flags = 0
if args.ignore_case:
flags = re.IGNORECASE
prefix = ''
if add_prefix:
prefix = '{}:'.format(os.path.splitext(os.path.basename(media_file))[0])
if (args.max_count is None):
args.max_count = float('inf')
timecode = ''
line_number = (- 1)
with open(out_file, 'r') as file:
while True:
line = file.readline()
line_number += 1
if (line_number == 0):
continue
if ((not line) or (count >= args.max_count)):
break
if (line.strip() == ''):
continue
if re.match('\\d*:\\d\\d.\\d*\\s-->\\s\\d*:\\d\\d.\\d*', line):
if args.time:
timecode = (line.split('-->')[0].strip() + ' ')
else:
timecode = (line.strip() + '; ')
continue
line = cleanhtml(line)
match = re.search(args.input[0], line, flags)
line = line.strip()
if match:
count += 1
if (not args.count):
if (args.timecode or args.time):
print(((prefix + timecode) + line))
else:
print((prefix + line))
if args.count:
print((prefix + str(count))) | -825,400,447,666,074,200 | We're using the WEBVTT subtitle format. It's better than srt
because it doesn't emit line numbers and the time code is in
(hh:mm:ss.sss) instead of (dd:hh:mm:ss,sss) | auto_editor/subcommands/grep.py | grep_core | chancat87/auto-editor | python | def grep_core(media_file: str, add_prefix: bool, ffmpeg: FFmpeg, args, log: Log, TEMP: str) -> None:
"\n We're using the WEBVTT subtitle format. It's better than srt\n because it doesn't emit line numbers and the time code is in\n (hh:mm:ss.sss) instead of (dd:hh:mm:ss,sss)\n "
out_file = os.path.join(TEMP, 'media.vtt')
ffmpeg.run(['-i', media_file, out_file])
count = 0
flags = 0
if args.ignore_case:
flags = re.IGNORECASE
prefix =
if add_prefix:
prefix = '{}:'.format(os.path.splitext(os.path.basename(media_file))[0])
if (args.max_count is None):
args.max_count = float('inf')
timecode =
line_number = (- 1)
with open(out_file, 'r') as file:
while True:
line = file.readline()
line_number += 1
if (line_number == 0):
continue
if ((not line) or (count >= args.max_count)):
break
if (line.strip() == ):
continue
if re.match('\\d*:\\d\\d.\\d*\\s-->\\s\\d*:\\d\\d.\\d*', line):
if args.time:
timecode = (line.split('-->')[0].strip() + ' ')
else:
timecode = (line.strip() + '; ')
continue
line = cleanhtml(line)
match = re.search(args.input[0], line, flags)
line = line.strip()
if match:
count += 1
if (not args.count):
if (args.timecode or args.time):
print(((prefix + timecode) + line))
else:
print((prefix + line))
if args.count:
print((prefix + str(count))) |
def absolute_login_url(provider_id, fence_idp=None, shib_idp=None):
'\n Args:\n provider_id (str): provider to log in with; an IDP_URL_MAP key.\n fence_idp (str, optional): if provider_id is "fence"\n (multi-tenant Fence setup), fence_idp can be any of the\n providers supported by the other Fence. If not specified,\n will default to NIH login.\n shib_idp (str, optional): if provider_id is "fence" and\n fence_idp is "shibboleth", shib_idp can be any Shibboleth/\n InCommon provider. If not specified, will default to NIH\n login.\n\n Returns:\n str: login URL for this provider, including extra query\n parameters if fence_idp and/or shib_idp are specified.\n '
try:
base_url = config['BASE_URL'].rstrip('/')
login_url = (base_url + '/login/{}'.format(IDP_URL_MAP[provider_id]))
except KeyError as e:
raise InternalError('identity provider misconfigured: {}'.format(str(e)))
params = {}
if fence_idp:
params['idp'] = fence_idp
if shib_idp:
params['shib_idp'] = shib_idp
login_url = add_params_to_uri(login_url, params)
return login_url | -2,670,125,579,266,269,700 | Args:
provider_id (str): provider to log in with; an IDP_URL_MAP key.
fence_idp (str, optional): if provider_id is "fence"
(multi-tenant Fence setup), fence_idp can be any of the
providers supported by the other Fence. If not specified,
will default to NIH login.
shib_idp (str, optional): if provider_id is "fence" and
fence_idp is "shibboleth", shib_idp can be any Shibboleth/
InCommon provider. If not specified, will default to NIH
login.
Returns:
str: login URL for this provider, including extra query
parameters if fence_idp and/or shib_idp are specified. | fence/blueprints/login/__init__.py | absolute_login_url | chicagopcdc/fence | python | def absolute_login_url(provider_id, fence_idp=None, shib_idp=None):
'\n Args:\n provider_id (str): provider to log in with; an IDP_URL_MAP key.\n fence_idp (str, optional): if provider_id is "fence"\n (multi-tenant Fence setup), fence_idp can be any of the\n providers supported by the other Fence. If not specified,\n will default to NIH login.\n shib_idp (str, optional): if provider_id is "fence" and\n fence_idp is "shibboleth", shib_idp can be any Shibboleth/\n InCommon provider. If not specified, will default to NIH\n login.\n\n Returns:\n str: login URL for this provider, including extra query\n parameters if fence_idp and/or shib_idp are specified.\n '
try:
base_url = config['BASE_URL'].rstrip('/')
login_url = (base_url + '/login/{}'.format(IDP_URL_MAP[provider_id]))
except KeyError as e:
raise InternalError('identity provider misconfigured: {}'.format(str(e)))
params = {}
if fence_idp:
params['idp'] = fence_idp
if shib_idp:
params['shib_idp'] = shib_idp
login_url = add_params_to_uri(login_url, params)
return login_url |
def provider_info(login_details):
'\n Args:\n login_details (dict):\n { name, desc, idp, fence_idp, shib_idps, secondary }\n - "idp": a configured provider.\n Multiple options can be configured with the same idp.\n - if provider_id is "fence", "fence_idp" can be any of the\n providers supported by the other Fence. If not specified, will\n default to NIH login.\n - if provider_id is "fence" and fence_idp is "shibboleth", a\n list of "shib_idps" can be configured for InCommon login. If\n not specified, will default to NIH login.\n - Optional parameters: "desc" (description) and "secondary"\n (boolean - can be used by the frontend to display secondary\n buttons differently).\n\n Returns:\n dict: { name, desc, idp, urls, secondary }\n - urls: list of { name, url } dictionaries\n '
info = {'id': login_details['idp'], 'idp': login_details['idp'], 'name': login_details['name'], 'url': absolute_login_url(login_details['idp']), 'desc': login_details.get('desc', None), 'secondary': login_details.get('secondary', False)}
fence_idp = None
if (login_details['idp'] == 'fence'):
fence_idp = login_details.get('fence_idp')
if (((login_details['idp'] == 'shibboleth') or (fence_idp == 'shibboleth')) and ('shib_idps' in login_details)):
if (not hasattr(flask.current_app, 'all_shib_idps')):
flask.current_app.all_shib_idps = get_all_shib_idps()
requested_shib_idps = login_details['shib_idps']
if (requested_shib_idps == '*'):
shib_idps = flask.current_app.all_shib_idps
elif isinstance(requested_shib_idps, list):
shib_idps = []
for requested_shib_idp in requested_shib_idps:
shib_idp = next((available_shib_idp for available_shib_idp in flask.current_app.all_shib_idps if (available_shib_idp['idp'] == requested_shib_idp)), None)
if (not shib_idp):
raise InternalError('Requested shib_idp "{}" does not exist'.format(requested_shib_idp))
shib_idps.append(shib_idp)
else:
raise InternalError('fence provider misconfigured: "shib_idps" must be a list or "*", got {}'.format(requested_shib_idps))
info['urls'] = [{'name': shib_idp['name'], 'url': absolute_login_url(login_details['idp'], fence_idp, shib_idp['idp'])} for shib_idp in shib_idps]
else:
info['urls'] = [{'name': login_details['name'], 'url': absolute_login_url(login_details['idp'], fence_idp)}]
return info | -2,740,890,148,715,159,000 | Args:
login_details (dict):
{ name, desc, idp, fence_idp, shib_idps, secondary }
- "idp": a configured provider.
Multiple options can be configured with the same idp.
- if provider_id is "fence", "fence_idp" can be any of the
providers supported by the other Fence. If not specified, will
default to NIH login.
- if provider_id is "fence" and fence_idp is "shibboleth", a
list of "shib_idps" can be configured for InCommon login. If
not specified, will default to NIH login.
- Optional parameters: "desc" (description) and "secondary"
(boolean - can be used by the frontend to display secondary
buttons differently).
Returns:
dict: { name, desc, idp, urls, secondary }
- urls: list of { name, url } dictionaries | fence/blueprints/login/__init__.py | provider_info | chicagopcdc/fence | python | def provider_info(login_details):
'\n Args:\n login_details (dict):\n { name, desc, idp, fence_idp, shib_idps, secondary }\n - "idp": a configured provider.\n Multiple options can be configured with the same idp.\n - if provider_id is "fence", "fence_idp" can be any of the\n providers supported by the other Fence. If not specified, will\n default to NIH login.\n - if provider_id is "fence" and fence_idp is "shibboleth", a\n list of "shib_idps" can be configured for InCommon login. If\n not specified, will default to NIH login.\n - Optional parameters: "desc" (description) and "secondary"\n (boolean - can be used by the frontend to display secondary\n buttons differently).\n\n Returns:\n dict: { name, desc, idp, urls, secondary }\n - urls: list of { name, url } dictionaries\n '
info = {'id': login_details['idp'], 'idp': login_details['idp'], 'name': login_details['name'], 'url': absolute_login_url(login_details['idp']), 'desc': login_details.get('desc', None), 'secondary': login_details.get('secondary', False)}
fence_idp = None
if (login_details['idp'] == 'fence'):
fence_idp = login_details.get('fence_idp')
if (((login_details['idp'] == 'shibboleth') or (fence_idp == 'shibboleth')) and ('shib_idps' in login_details)):
if (not hasattr(flask.current_app, 'all_shib_idps')):
flask.current_app.all_shib_idps = get_all_shib_idps()
requested_shib_idps = login_details['shib_idps']
if (requested_shib_idps == '*'):
shib_idps = flask.current_app.all_shib_idps
elif isinstance(requested_shib_idps, list):
shib_idps = []
for requested_shib_idp in requested_shib_idps:
shib_idp = next((available_shib_idp for available_shib_idp in flask.current_app.all_shib_idps if (available_shib_idp['idp'] == requested_shib_idp)), None)
if (not shib_idp):
raise InternalError('Requested shib_idp "{}" does not exist'.format(requested_shib_idp))
shib_idps.append(shib_idp)
else:
raise InternalError('fence provider misconfigured: "shib_idps" must be a list or "*", got {}'.format(requested_shib_idps))
info['urls'] = [{'name': shib_idp['name'], 'url': absolute_login_url(login_details['idp'], fence_idp, shib_idp['idp'])} for shib_idp in shib_idps]
else:
info['urls'] = [{'name': login_details['name'], 'url': absolute_login_url(login_details['idp'], fence_idp)}]
return info |
def make_login_blueprint():
'\n Return:\n flask.Blueprint: the blueprint used for ``/login`` endpoints\n\n Raises:\n ValueError: if app is not amenably configured\n '
blueprint = flask.Blueprint('login', __name__)
blueprint_api = RestfulApi(blueprint, decorators=[enable_audit_logging])
@blueprint.route('', methods=['GET'])
def default_login():
'\n The default root login route.\n '
(default_provider_info, all_provider_info) = get_login_providers_info()
return flask.jsonify({'default_provider': default_provider_info, 'providers': all_provider_info})
configured_idps = config['OPENID_CONNECT'].keys()
if ('fence' in configured_idps):
blueprint_api.add_resource(FenceLogin, '/fence', strict_slashes=False)
blueprint_api.add_resource(FenceCallback, '/fence/login', strict_slashes=False)
if ('google' in configured_idps):
blueprint_api.add_resource(GoogleLogin, '/google', strict_slashes=False)
blueprint_api.add_resource(GoogleCallback, '/google/login', strict_slashes=False)
if ('orcid' in configured_idps):
blueprint_api.add_resource(ORCIDLogin, '/orcid', strict_slashes=False)
blueprint_api.add_resource(ORCIDCallback, '/orcid/login', strict_slashes=False)
if ('ras' in configured_idps):
blueprint_api.add_resource(RASLogin, '/ras', strict_slashes=False)
blueprint_api.add_resource(RASCallback, '/ras/callback', strict_slashes=False)
if ('synapse' in configured_idps):
blueprint_api.add_resource(SynapseLogin, '/synapse', strict_slashes=False)
blueprint_api.add_resource(SynapseCallback, '/synapse/login', strict_slashes=False)
if ('microsoft' in configured_idps):
blueprint_api.add_resource(MicrosoftLogin, '/microsoft', strict_slashes=False)
blueprint_api.add_resource(MicrosoftCallback, '/microsoft/login', strict_slashes=False)
if ('okta' in configured_idps):
blueprint_api.add_resource(OktaLogin, '/okta', strict_slashes=False)
blueprint_api.add_resource(OktaCallback, '/okta/login', strict_slashes=False)
if ('cognito' in configured_idps):
blueprint_api.add_resource(CognitoLogin, '/cognito', strict_slashes=False)
blueprint_api.add_resource(CognitoCallback, '/cognito/login', strict_slashes=False)
if ('shibboleth' in configured_idps):
blueprint_api.add_resource(ShibbolethLogin, '/shib', strict_slashes=False)
blueprint_api.add_resource(ShibbolethCallback, '/shib/login', strict_slashes=False)
if ('cilogon' in configured_idps):
blueprint_api.add_resource(CilogonLogin, '/cilogon', strict_slashes=False)
blueprint_api.add_resource(CilogonCallback, '/cilogon/login', strict_slashes=False)
return blueprint | 5,273,018,673,974,718,000 | Return:
flask.Blueprint: the blueprint used for ``/login`` endpoints
Raises:
ValueError: if app is not amenably configured | fence/blueprints/login/__init__.py | make_login_blueprint | chicagopcdc/fence | python | def make_login_blueprint():
'\n Return:\n flask.Blueprint: the blueprint used for ``/login`` endpoints\n\n Raises:\n ValueError: if app is not amenably configured\n '
blueprint = flask.Blueprint('login', __name__)
blueprint_api = RestfulApi(blueprint, decorators=[enable_audit_logging])
@blueprint.route(, methods=['GET'])
def default_login():
'\n The default root login route.\n '
(default_provider_info, all_provider_info) = get_login_providers_info()
return flask.jsonify({'default_provider': default_provider_info, 'providers': all_provider_info})
configured_idps = config['OPENID_CONNECT'].keys()
if ('fence' in configured_idps):
blueprint_api.add_resource(FenceLogin, '/fence', strict_slashes=False)
blueprint_api.add_resource(FenceCallback, '/fence/login', strict_slashes=False)
if ('google' in configured_idps):
blueprint_api.add_resource(GoogleLogin, '/google', strict_slashes=False)
blueprint_api.add_resource(GoogleCallback, '/google/login', strict_slashes=False)
if ('orcid' in configured_idps):
blueprint_api.add_resource(ORCIDLogin, '/orcid', strict_slashes=False)
blueprint_api.add_resource(ORCIDCallback, '/orcid/login', strict_slashes=False)
if ('ras' in configured_idps):
blueprint_api.add_resource(RASLogin, '/ras', strict_slashes=False)
blueprint_api.add_resource(RASCallback, '/ras/callback', strict_slashes=False)
if ('synapse' in configured_idps):
blueprint_api.add_resource(SynapseLogin, '/synapse', strict_slashes=False)
blueprint_api.add_resource(SynapseCallback, '/synapse/login', strict_slashes=False)
if ('microsoft' in configured_idps):
blueprint_api.add_resource(MicrosoftLogin, '/microsoft', strict_slashes=False)
blueprint_api.add_resource(MicrosoftCallback, '/microsoft/login', strict_slashes=False)
if ('okta' in configured_idps):
blueprint_api.add_resource(OktaLogin, '/okta', strict_slashes=False)
blueprint_api.add_resource(OktaCallback, '/okta/login', strict_slashes=False)
if ('cognito' in configured_idps):
blueprint_api.add_resource(CognitoLogin, '/cognito', strict_slashes=False)
blueprint_api.add_resource(CognitoCallback, '/cognito/login', strict_slashes=False)
if ('shibboleth' in configured_idps):
blueprint_api.add_resource(ShibbolethLogin, '/shib', strict_slashes=False)
blueprint_api.add_resource(ShibbolethCallback, '/shib/login', strict_slashes=False)
if ('cilogon' in configured_idps):
blueprint_api.add_resource(CilogonLogin, '/cilogon', strict_slashes=False)
blueprint_api.add_resource(CilogonCallback, '/cilogon/login', strict_slashes=False)
return blueprint |
def get_all_shib_idps():
'\n Get the list of all existing Shibboleth IDPs.\n This function only returns the information we need to generate login URLs.\n\n Returns:\n list: list of {"idp": "", "name": ""} dictionaries\n '
url = config['OPENID_CONNECT'].get('fence', {}).get('shibboleth_discovery_url')
if (not url):
raise InternalError('Unable to get list of Shibboleth IDPs: OPENID_CONNECT.fence.shibboleth_discovery_url not configured')
res = requests.get(url)
assert (res.status_code == 200), 'Unable to get list of Shibboleth IDPs from {}'.format(url)
all_shib_idps = []
for shib_idp in res.json():
if ('entityID' not in shib_idp):
logger.warning(f"get_all_shib_idps(): 'entityID' field not in IDP data: {shib_idp}. Skipping this IDP.")
continue
idp = shib_idp['entityID']
if (len(shib_idp.get('DisplayNames', [])) > 0):
name = get_shib_idp_en_name(shib_idp['DisplayNames'])
else:
logger.warning(f"get_all_shib_idps(): 'DisplayNames' field not in IDP data: {shib_idp}. Using IDP ID '{idp}' as IDP name.")
name = idp
all_shib_idps.append({'idp': idp, 'name': name})
return all_shib_idps | -9,166,596,299,316,049,000 | Get the list of all existing Shibboleth IDPs.
This function only returns the information we need to generate login URLs.
Returns:
list: list of {"idp": "", "name": ""} dictionaries | fence/blueprints/login/__init__.py | get_all_shib_idps | chicagopcdc/fence | python | def get_all_shib_idps():
'\n Get the list of all existing Shibboleth IDPs.\n This function only returns the information we need to generate login URLs.\n\n Returns:\n list: list of {"idp": , "name": } dictionaries\n '
url = config['OPENID_CONNECT'].get('fence', {}).get('shibboleth_discovery_url')
if (not url):
raise InternalError('Unable to get list of Shibboleth IDPs: OPENID_CONNECT.fence.shibboleth_discovery_url not configured')
res = requests.get(url)
assert (res.status_code == 200), 'Unable to get list of Shibboleth IDPs from {}'.format(url)
all_shib_idps = []
for shib_idp in res.json():
if ('entityID' not in shib_idp):
logger.warning(f"get_all_shib_idps(): 'entityID' field not in IDP data: {shib_idp}. Skipping this IDP.")
continue
idp = shib_idp['entityID']
if (len(shib_idp.get('DisplayNames', [])) > 0):
name = get_shib_idp_en_name(shib_idp['DisplayNames'])
else:
logger.warning(f"get_all_shib_idps(): 'DisplayNames' field not in IDP data: {shib_idp}. Using IDP ID '{idp}' as IDP name.")
name = idp
all_shib_idps.append({'idp': idp, 'name': name})
return all_shib_idps |
def get_shib_idp_en_name(names):
'\n Returns a name in English for a Shibboleth IDP, or the first available\n name if no English name was provided.\n\n Args:\n names (list): list of {"lang": "", "value": ""} dictionaries\n Example:\n [\n {\n "value": "University of Chicago",\n "lang": "en"\n },\n {\n "value": "Universidad de Chicago",\n "lang": "es"\n }\n ]\n\n Returns:\n str: Display name to use for this Shibboleth IDP\n '
for name in names:
if (name.get('lang') == 'en'):
return name['value']
return names[0]['value'] | 48,967,702,506,096,370 | Returns a name in English for a Shibboleth IDP, or the first available
name if no English name was provided.
Args:
names (list): list of {"lang": "", "value": ""} dictionaries
Example:
[
{
"value": "University of Chicago",
"lang": "en"
},
{
"value": "Universidad de Chicago",
"lang": "es"
}
]
Returns:
str: Display name to use for this Shibboleth IDP | fence/blueprints/login/__init__.py | get_shib_idp_en_name | chicagopcdc/fence | python | def get_shib_idp_en_name(names):
'\n Returns a name in English for a Shibboleth IDP, or the first available\n name if no English name was provided.\n\n Args:\n names (list): list of {"lang": , "value": } dictionaries\n Example:\n [\n {\n "value": "University of Chicago",\n "lang": "en"\n },\n {\n "value": "Universidad de Chicago",\n "lang": "es"\n }\n ]\n\n Returns:\n str: Display name to use for this Shibboleth IDP\n '
for name in names:
if (name.get('lang') == 'en'):
return name['value']
return names[0]['value'] |
@blueprint.route('', methods=['GET'])
def default_login():
'\n The default root login route.\n '
(default_provider_info, all_provider_info) = get_login_providers_info()
return flask.jsonify({'default_provider': default_provider_info, 'providers': all_provider_info}) | 3,493,305,278,892,696,000 | The default root login route. | fence/blueprints/login/__init__.py | default_login | chicagopcdc/fence | python | @blueprint.route(, methods=['GET'])
def default_login():
'\n \n '
(default_provider_info, all_provider_info) = get_login_providers_info()
return flask.jsonify({'default_provider': default_provider_info, 'providers': all_provider_info}) |
def _split_path(path):
' Split a path into a list of directory names '
if (path[0] != '/'):
raise Exception('Not absolute path')
result = []
while (path != '/'):
(path, tail) = os.path.split(path)
if tail:
result.append(tail)
return list(reversed(result)) | 984,756,720,676,671,400 | Split a path into a list of directory names | src/xrootd_cache_stats.py | _split_path | ivukotic/xcache | python | def _split_path(path):
' '
if (path[0] != '/'):
raise Exception('Not absolute path')
result = []
while (path != '/'):
(path, tail) = os.path.split(path)
if tail:
result.append(tail)
return list(reversed(result)) |
def _is_prefix(lhs, rhs):
' return True if the first list is a prefix of the second '
rhs = list(rhs)
while rhs:
if (lhs == rhs):
return True
rhs.pop()
return False | 3,350,491,932,785,587,700 | return True if the first list is a prefix of the second | src/xrootd_cache_stats.py | _is_prefix | ivukotic/xcache | python | def _is_prefix(lhs, rhs):
' '
rhs = list(rhs)
while rhs:
if (lhs == rhs):
return True
rhs.pop()
return False |
def scan_cache_dirs(rootdir):
' Scan the top level directory of the cache.\n Walks the path looking for directories that are not in vo_paths.\n For each of these generate a cache summary\n '
results = {}
try:
root_components = _split_path(rootdir)
for (dirpath, dirnames, filenames) in os.walk(rootdir, topdown=True):
dirpath_components = _split_path(dirpath)[len(root_components):]
for name in list(dirnames):
path_components = (dirpath_components + [name])
for p in [_split_path(p) for p in vo_paths]:
if _is_prefix(path_components, p):
break
else:
vo_name = os.path.join('/', *path_components)
try:
results[vo_name] = scan_vo_dir(os.path.join(dirpath, name))
except (OSError, IOError) as ex:
results[vo_name] = {'scan_vo_dir_error': str(ex)}
dirnames.remove(name)
return results
except (OSError, IOError) as ex:
return {'scan_cache_dirs_error': {'message': str(ex)}} | -5,469,963,317,324,711,000 | Scan the top level directory of the cache.
Walks the path looking for directories that are not in vo_paths.
For each of these generate a cache summary | src/xrootd_cache_stats.py | scan_cache_dirs | ivukotic/xcache | python | def scan_cache_dirs(rootdir):
' Scan the top level directory of the cache.\n Walks the path looking for directories that are not in vo_paths.\n For each of these generate a cache summary\n '
results = {}
try:
root_components = _split_path(rootdir)
for (dirpath, dirnames, filenames) in os.walk(rootdir, topdown=True):
dirpath_components = _split_path(dirpath)[len(root_components):]
for name in list(dirnames):
path_components = (dirpath_components + [name])
for p in [_split_path(p) for p in vo_paths]:
if _is_prefix(path_components, p):
break
else:
vo_name = os.path.join('/', *path_components)
try:
results[vo_name] = scan_vo_dir(os.path.join(dirpath, name))
except (OSError, IOError) as ex:
results[vo_name] = {'scan_vo_dir_error': str(ex)}
dirnames.remove(name)
return results
except (OSError, IOError) as ex:
return {'scan_cache_dirs_error': {'message': str(ex)}} |
def scan_vo_dir(vodir):
' Scan a VO directory (assumed to be the whole directory tree after the top level '
now = time.time()
totalsize = 0
nfiles = 0
naccesses = 0
accesses = collections.defaultdict(int)
most_recent_access = 0
bad_cinfo_files = 0
for (root, dirs, files) in os.walk(vodir):
fnames = set(files)
for (f, cinfo) in ((f, (f + '.cinfo')) for f in fnames if ((f + '.cinfo') in fnames)):
try:
st = os.stat(os.path.join(root, f))
except OSError as ex:
if (ex.errno == errno.ENOENT):
continue
else:
raise
try:
access_info = read_cinfo(os.path.join(root, cinfo), now)
except OSError as ex:
if (ex.errno == errno.ENOENT):
continue
else:
bad_cinfo_files += 1
access_info = {'naccesses': 0, 'last_access': 0, 'by_hour': {}}
except ReadCInfoError as ex:
bad_cinfo_files += 1
access_info = ex.access_info
nfiles += 1
file_size = (st.st_blocks * 512)
totalsize += file_size
naccesses += access_info['naccesses']
most_recent_access = max(most_recent_access, access_info['last_access'])
for h in access_info['by_hour']:
accesses[('naccesses_hr_' + h)] += access_info['by_hour'][h]
accesses[('bytes_hr_' + h)] += access_info['bytes_hr'][h]
result = classad.ClassAd({'used_bytes': totalsize, 'nfiles': nfiles, 'naccesses': naccesses, 'bad_cinfo_files': bad_cinfo_files})
result.update(accesses)
if (most_recent_access > 0):
result['most_recent_access_time'] = most_recent_access
return result | 8,486,517,527,046,751,000 | Scan a VO directory (assumed to be the whole directory tree after the top level | src/xrootd_cache_stats.py | scan_vo_dir | ivukotic/xcache | python | def scan_vo_dir(vodir):
' '
now = time.time()
totalsize = 0
nfiles = 0
naccesses = 0
accesses = collections.defaultdict(int)
most_recent_access = 0
bad_cinfo_files = 0
for (root, dirs, files) in os.walk(vodir):
fnames = set(files)
for (f, cinfo) in ((f, (f + '.cinfo')) for f in fnames if ((f + '.cinfo') in fnames)):
try:
st = os.stat(os.path.join(root, f))
except OSError as ex:
if (ex.errno == errno.ENOENT):
continue
else:
raise
try:
access_info = read_cinfo(os.path.join(root, cinfo), now)
except OSError as ex:
if (ex.errno == errno.ENOENT):
continue
else:
bad_cinfo_files += 1
access_info = {'naccesses': 0, 'last_access': 0, 'by_hour': {}}
except ReadCInfoError as ex:
bad_cinfo_files += 1
access_info = ex.access_info
nfiles += 1
file_size = (st.st_blocks * 512)
totalsize += file_size
naccesses += access_info['naccesses']
most_recent_access = max(most_recent_access, access_info['last_access'])
for h in access_info['by_hour']:
accesses[('naccesses_hr_' + h)] += access_info['by_hour'][h]
accesses[('bytes_hr_' + h)] += access_info['bytes_hr'][h]
result = classad.ClassAd({'used_bytes': totalsize, 'nfiles': nfiles, 'naccesses': naccesses, 'bad_cinfo_files': bad_cinfo_files})
result.update(accesses)
if (most_recent_access > 0):
result['most_recent_access_time'] = most_recent_access
return result |
def read_cinfo(cinfo_file, now):
' Try to extract useful info from the cinfo file '
result = {'naccesses': 0, 'last_access': 0, 'by_hour': {'01': 0, '12': 0, '24': 0}, 'bytes_hr': {'01': 0, '12': 0, '24': 0}}
cf = open(cinfo_file, 'rb')
buf = cf.read(_header_fmt_size)
if (len(buf) < _header_fmt_size):
raise ReadCInfoError(('%s header too short' % cinfo_file), result)
(version, buffer_size, file_size) = struct.unpack(_header_fmt, buf)
if (version != 2):
raise ReadCInfoError(('%s unknown version: %s' % (cinfo_file, version)), result)
buff_synced_len = int(math.ceil(((float(file_size) / buffer_size) / 8)))
if (file_size == 0):
buff_synced_len = 1
cf.read(buff_synced_len)
cf.read((16 + 8))
buf = cf.read(_int_fmt_size)
if (len(buf) < _int_fmt_size):
raise ReadCInfoError(('%s: invalid access field' % cinfo_file), result)
(access_count,) = struct.unpack(_int_fmt, buf)
result['naccesses'] = access_count
if (access_count < 0):
raise ReadCInfoError(('%s: invalid access count: %s' % (cinfo_file, access_count)), result)
elif (access_count == 0):
return result
hr_01 = (now - (60 * 60))
hr_12 = (now - ((12 * 60) * 60))
hr_24 = (now - ((24 * 60) * 60))
try:
for buf in iter((lambda : cf.read(_status_fmt_size)), b''):
(access_time, _, bytes_disk, bytes_ram, _) = struct.unpack(_status_fmt, buf)
result['last_access'] = access_time
intervals = list()
if (access_time >= hr_01):
intervals.append('01')
if (access_time >= hr_12):
intervals.append('12')
if (access_time >= hr_24):
intervals.append('24')
else:
next
for interval in intervals:
result['by_hour'][interval] += 1
result['bytes_hr'][interval] += (bytes_disk + bytes_ram)
except struct.error as ex:
raise ReadCInfoError(('%s unable to decode access time data: %s' % (cinfo_file, str(ex))), result)
return result | -835,176,595,427,975,000 | Try to extract useful info from the cinfo file | src/xrootd_cache_stats.py | read_cinfo | ivukotic/xcache | python | def read_cinfo(cinfo_file, now):
' '
result = {'naccesses': 0, 'last_access': 0, 'by_hour': {'01': 0, '12': 0, '24': 0}, 'bytes_hr': {'01': 0, '12': 0, '24': 0}}
cf = open(cinfo_file, 'rb')
buf = cf.read(_header_fmt_size)
if (len(buf) < _header_fmt_size):
raise ReadCInfoError(('%s header too short' % cinfo_file), result)
(version, buffer_size, file_size) = struct.unpack(_header_fmt, buf)
if (version != 2):
raise ReadCInfoError(('%s unknown version: %s' % (cinfo_file, version)), result)
buff_synced_len = int(math.ceil(((float(file_size) / buffer_size) / 8)))
if (file_size == 0):
buff_synced_len = 1
cf.read(buff_synced_len)
cf.read((16 + 8))
buf = cf.read(_int_fmt_size)
if (len(buf) < _int_fmt_size):
raise ReadCInfoError(('%s: invalid access field' % cinfo_file), result)
(access_count,) = struct.unpack(_int_fmt, buf)
result['naccesses'] = access_count
if (access_count < 0):
raise ReadCInfoError(('%s: invalid access count: %s' % (cinfo_file, access_count)), result)
elif (access_count == 0):
return result
hr_01 = (now - (60 * 60))
hr_12 = (now - ((12 * 60) * 60))
hr_24 = (now - ((24 * 60) * 60))
try:
for buf in iter((lambda : cf.read(_status_fmt_size)), b):
(access_time, _, bytes_disk, bytes_ram, _) = struct.unpack(_status_fmt, buf)
result['last_access'] = access_time
intervals = list()
if (access_time >= hr_01):
intervals.append('01')
if (access_time >= hr_12):
intervals.append('12')
if (access_time >= hr_24):
intervals.append('24')
else:
next
for interval in intervals:
result['by_hour'][interval] += 1
result['bytes_hr'][interval] += (bytes_disk + bytes_ram)
except struct.error as ex:
raise ReadCInfoError(('%s unable to decode access time data: %s' % (cinfo_file, str(ex))), result)
return result |
def test_xrootd_server(url):
" Contact the xrootd server to check if it's alive\n "
try:
myclient = XRootD.client.FileSystem(url)
startt = time.time()
(response, _) = myclient.ping(timeout=10)
elapsed = (time.time() - startt)
if response.fatal:
status = 'fatal'
elif response.error:
status = 'error'
elif response.ok:
status = 'ok'
else:
status = 'unknown'
result = {'ping_response_status': status, 'ping_response_code': response.code, 'ping_response_message': response.message, 'ping_elapsed_time': elapsed}
return result
except Exception as ex:
return {'ping_response_status': 'failed', 'ping_response_code': (- 1), 'ping_response_message': str(ex), 'ping_elapsed_time': 0.0} | 3,086,439,373,156,683,000 | Contact the xrootd server to check if it's alive | src/xrootd_cache_stats.py | test_xrootd_server | ivukotic/xcache | python | def test_xrootd_server(url):
" \n "
try:
myclient = XRootD.client.FileSystem(url)
startt = time.time()
(response, _) = myclient.ping(timeout=10)
elapsed = (time.time() - startt)
if response.fatal:
status = 'fatal'
elif response.error:
status = 'error'
elif response.ok:
status = 'ok'
else:
status = 'unknown'
result = {'ping_response_status': status, 'ping_response_code': response.code, 'ping_response_message': response.message, 'ping_elapsed_time': elapsed}
return result
except Exception as ex:
return {'ping_response_status': 'failed', 'ping_response_code': (- 1), 'ping_response_message': str(ex), 'ping_elapsed_time': 0.0} |
def get_cache_info(rootdir, cache_max_fs_fraction):
'Get information about the cache itself'
result = {}
try:
stat = os.statvfs(rootdir)
total_size = int(((stat.f_blocks * stat.f_bsize) * cache_max_fs_fraction))
free_size = int((total_size - ((stat.f_blocks - stat.f_bfree) * stat.f_bsize)))
result['total_cache_bytes'] = total_size
result['free_cache_bytes'] = free_size
result['free_cache_fraction'] = (1 - (float((stat.f_blocks - stat.f_bfree)) / int((stat.f_blocks * cache_max_fs_fraction))))
return result
except (OSError, IOError) as ex:
return {} | -1,475,640,488,449,280,000 | Get information about the cache itself | src/xrootd_cache_stats.py | get_cache_info | ivukotic/xcache | python | def get_cache_info(rootdir, cache_max_fs_fraction):
result = {}
try:
stat = os.statvfs(rootdir)
total_size = int(((stat.f_blocks * stat.f_bsize) * cache_max_fs_fraction))
free_size = int((total_size - ((stat.f_blocks - stat.f_bfree) * stat.f_bsize)))
result['total_cache_bytes'] = total_size
result['free_cache_bytes'] = free_size
result['free_cache_fraction'] = (1 - (float((stat.f_blocks - stat.f_bfree)) / int((stat.f_blocks * cache_max_fs_fraction))))
return result
except (OSError, IOError) as ex:
return {} |
def collect_cache_stats(url, rootdir, cache_max_fs_fraction=1.0):
' Collect stats on the cache server '
start_time = time.time()
parsed_url = urllib.parse.urlparse(url)
try:
if (parsed_url.scheme not in ('root', 'xroot')):
raise Exception(("URL '%s' is not an xrootd url" % url))
hostname = parsed_url.netloc
except AttributeError:
if (parsed_url[0] not in ('root', 'xroot')):
raise Exception(("URL '%s' is not an xrootd url" % url))
hostname = parsed_url[2][2:]
result = {'MyType': 'Machine', 'Name': ('xrootd@%s' % hostname), 'stats_time': int(start_time)}
result.update(test_xrootd_server(url))
result.update(get_cache_info(rootdir, cache_max_fs_fraction))
stats_per_vo = scan_cache_dirs(rootdir)
totals = dict()
most_recent_access = 0
result['VO'] = {}
for (vo, vostats) in stats_per_vo.items():
for (k, v) in vostats.items():
if (k == 'most_recent_access_time'):
most_recent_access = max(most_recent_access, v)
else:
try:
totals[k] += v
except KeyError:
totals[k] = v
result['VO'][vo] = vostats
result['used_cache_bytes'] = totals.pop('used_bytes', 0)
for (k, v) in totals.items():
result[('total_' + k)] = v
if (most_recent_access > 0):
result['most_recent_access_time'] = most_recent_access
result['time_to_collect_stats'] = (time.time() - start_time)
return classad.ClassAd(result) | -6,529,351,487,603,837,000 | Collect stats on the cache server | src/xrootd_cache_stats.py | collect_cache_stats | ivukotic/xcache | python | def collect_cache_stats(url, rootdir, cache_max_fs_fraction=1.0):
' '
start_time = time.time()
parsed_url = urllib.parse.urlparse(url)
try:
if (parsed_url.scheme not in ('root', 'xroot')):
raise Exception(("URL '%s' is not an xrootd url" % url))
hostname = parsed_url.netloc
except AttributeError:
if (parsed_url[0] not in ('root', 'xroot')):
raise Exception(("URL '%s' is not an xrootd url" % url))
hostname = parsed_url[2][2:]
result = {'MyType': 'Machine', 'Name': ('xrootd@%s' % hostname), 'stats_time': int(start_time)}
result.update(test_xrootd_server(url))
result.update(get_cache_info(rootdir, cache_max_fs_fraction))
stats_per_vo = scan_cache_dirs(rootdir)
totals = dict()
most_recent_access = 0
result['VO'] = {}
for (vo, vostats) in stats_per_vo.items():
for (k, v) in vostats.items():
if (k == 'most_recent_access_time'):
most_recent_access = max(most_recent_access, v)
else:
try:
totals[k] += v
except KeyError:
totals[k] = v
result['VO'][vo] = vostats
result['used_cache_bytes'] = totals.pop('used_bytes', 0)
for (k, v) in totals.items():
result[('total_' + k)] = v
if (most_recent_access > 0):
result['most_recent_access_time'] = most_recent_access
result['time_to_collect_stats'] = (time.time() - start_time)
return classad.ClassAd(result) |
def __init__(__self__, *, resource_group_name: pulumi.Input[str], address_space: Optional[pulumi.Input['AddressSpaceArgs']]=None, bgp_properties: Optional[pulumi.Input['BgpSettingsArgs']]=None, device_properties: Optional[pulumi.Input['DevicePropertiesArgs']]=None, id: Optional[pulumi.Input[str]]=None, ip_address: Optional[pulumi.Input[str]]=None, is_security_site: Optional[pulumi.Input[bool]]=None, location: Optional[pulumi.Input[str]]=None, site_key: Optional[pulumi.Input[str]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None, virtual_wan: Optional[pulumi.Input['SubResourceArgs']]=None, vpn_site_links: Optional[pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]]]=None, vpn_site_name: Optional[pulumi.Input[str]]=None):
"\n The set of arguments for constructing a VpnSite resource.\n :param pulumi.Input[str] resource_group_name: The resource group name of the VpnSite.\n :param pulumi.Input['AddressSpaceArgs'] address_space: The AddressSpace that contains an array of IP address ranges.\n :param pulumi.Input['BgpSettingsArgs'] bgp_properties: The set of bgp properties.\n :param pulumi.Input['DevicePropertiesArgs'] device_properties: The device properties.\n :param pulumi.Input[str] id: Resource ID.\n :param pulumi.Input[str] ip_address: The ip-address for the vpn-site.\n :param pulumi.Input[bool] is_security_site: IsSecuritySite flag.\n :param pulumi.Input[str] location: Resource location.\n :param pulumi.Input[str] site_key: The key for vpn-site that can be used for connections.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.\n :param pulumi.Input['SubResourceArgs'] virtual_wan: The VirtualWAN to which the vpnSite belongs.\n :param pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]] vpn_site_links: List of all vpn site links.\n :param pulumi.Input[str] vpn_site_name: The name of the VpnSite being created or updated.\n "
pulumi.set(__self__, 'resource_group_name', resource_group_name)
if (address_space is not None):
pulumi.set(__self__, 'address_space', address_space)
if (bgp_properties is not None):
pulumi.set(__self__, 'bgp_properties', bgp_properties)
if (device_properties is not None):
pulumi.set(__self__, 'device_properties', device_properties)
if (id is not None):
pulumi.set(__self__, 'id', id)
if (ip_address is not None):
pulumi.set(__self__, 'ip_address', ip_address)
if (is_security_site is not None):
pulumi.set(__self__, 'is_security_site', is_security_site)
if (location is not None):
pulumi.set(__self__, 'location', location)
if (site_key is not None):
pulumi.set(__self__, 'site_key', site_key)
if (tags is not None):
pulumi.set(__self__, 'tags', tags)
if (virtual_wan is not None):
pulumi.set(__self__, 'virtual_wan', virtual_wan)
if (vpn_site_links is not None):
pulumi.set(__self__, 'vpn_site_links', vpn_site_links)
if (vpn_site_name is not None):
pulumi.set(__self__, 'vpn_site_name', vpn_site_name) | 6,120,896,590,403,586,000 | The set of arguments for constructing a VpnSite resource.
:param pulumi.Input[str] resource_group_name: The resource group name of the VpnSite.
:param pulumi.Input['AddressSpaceArgs'] address_space: The AddressSpace that contains an array of IP address ranges.
:param pulumi.Input['BgpSettingsArgs'] bgp_properties: The set of bgp properties.
:param pulumi.Input['DevicePropertiesArgs'] device_properties: The device properties.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] ip_address: The ip-address for the vpn-site.
:param pulumi.Input[bool] is_security_site: IsSecuritySite flag.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] site_key: The key for vpn-site that can be used for connections.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input['SubResourceArgs'] virtual_wan: The VirtualWAN to which the vpnSite belongs.
:param pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]] vpn_site_links: List of all vpn site links.
:param pulumi.Input[str] vpn_site_name: The name of the VpnSite being created or updated. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | __init__ | sebtelko/pulumi-azure-native | python | def __init__(__self__, *, resource_group_name: pulumi.Input[str], address_space: Optional[pulumi.Input['AddressSpaceArgs']]=None, bgp_properties: Optional[pulumi.Input['BgpSettingsArgs']]=None, device_properties: Optional[pulumi.Input['DevicePropertiesArgs']]=None, id: Optional[pulumi.Input[str]]=None, ip_address: Optional[pulumi.Input[str]]=None, is_security_site: Optional[pulumi.Input[bool]]=None, location: Optional[pulumi.Input[str]]=None, site_key: Optional[pulumi.Input[str]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None, virtual_wan: Optional[pulumi.Input['SubResourceArgs']]=None, vpn_site_links: Optional[pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]]]=None, vpn_site_name: Optional[pulumi.Input[str]]=None):
"\n The set of arguments for constructing a VpnSite resource.\n :param pulumi.Input[str] resource_group_name: The resource group name of the VpnSite.\n :param pulumi.Input['AddressSpaceArgs'] address_space: The AddressSpace that contains an array of IP address ranges.\n :param pulumi.Input['BgpSettingsArgs'] bgp_properties: The set of bgp properties.\n :param pulumi.Input['DevicePropertiesArgs'] device_properties: The device properties.\n :param pulumi.Input[str] id: Resource ID.\n :param pulumi.Input[str] ip_address: The ip-address for the vpn-site.\n :param pulumi.Input[bool] is_security_site: IsSecuritySite flag.\n :param pulumi.Input[str] location: Resource location.\n :param pulumi.Input[str] site_key: The key for vpn-site that can be used for connections.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.\n :param pulumi.Input['SubResourceArgs'] virtual_wan: The VirtualWAN to which the vpnSite belongs.\n :param pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]] vpn_site_links: List of all vpn site links.\n :param pulumi.Input[str] vpn_site_name: The name of the VpnSite being created or updated.\n "
pulumi.set(__self__, 'resource_group_name', resource_group_name)
if (address_space is not None):
pulumi.set(__self__, 'address_space', address_space)
if (bgp_properties is not None):
pulumi.set(__self__, 'bgp_properties', bgp_properties)
if (device_properties is not None):
pulumi.set(__self__, 'device_properties', device_properties)
if (id is not None):
pulumi.set(__self__, 'id', id)
if (ip_address is not None):
pulumi.set(__self__, 'ip_address', ip_address)
if (is_security_site is not None):
pulumi.set(__self__, 'is_security_site', is_security_site)
if (location is not None):
pulumi.set(__self__, 'location', location)
if (site_key is not None):
pulumi.set(__self__, 'site_key', site_key)
if (tags is not None):
pulumi.set(__self__, 'tags', tags)
if (virtual_wan is not None):
pulumi.set(__self__, 'virtual_wan', virtual_wan)
if (vpn_site_links is not None):
pulumi.set(__self__, 'vpn_site_links', vpn_site_links)
if (vpn_site_name is not None):
pulumi.set(__self__, 'vpn_site_name', vpn_site_name) |
@property
@pulumi.getter(name='resourceGroupName')
def resource_group_name(self) -> pulumi.Input[str]:
'\n The resource group name of the VpnSite.\n '
return pulumi.get(self, 'resource_group_name') | 8,317,305,569,743,092,000 | The resource group name of the VpnSite. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | resource_group_name | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter(name='resourceGroupName')
def resource_group_name(self) -> pulumi.Input[str]:
'\n \n '
return pulumi.get(self, 'resource_group_name') |
@property
@pulumi.getter(name='addressSpace')
def address_space(self) -> Optional[pulumi.Input['AddressSpaceArgs']]:
'\n The AddressSpace that contains an array of IP address ranges.\n '
return pulumi.get(self, 'address_space') | 2,101,335,417,336,896,000 | The AddressSpace that contains an array of IP address ranges. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | address_space | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter(name='addressSpace')
def address_space(self) -> Optional[pulumi.Input['AddressSpaceArgs']]:
'\n \n '
return pulumi.get(self, 'address_space') |
@property
@pulumi.getter(name='bgpProperties')
def bgp_properties(self) -> Optional[pulumi.Input['BgpSettingsArgs']]:
'\n The set of bgp properties.\n '
return pulumi.get(self, 'bgp_properties') | -3,046,225,807,278,165,500 | The set of bgp properties. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | bgp_properties | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter(name='bgpProperties')
def bgp_properties(self) -> Optional[pulumi.Input['BgpSettingsArgs']]:
'\n \n '
return pulumi.get(self, 'bgp_properties') |
@property
@pulumi.getter(name='deviceProperties')
def device_properties(self) -> Optional[pulumi.Input['DevicePropertiesArgs']]:
'\n The device properties.\n '
return pulumi.get(self, 'device_properties') | 7,089,106,565,800,478,000 | The device properties. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | device_properties | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter(name='deviceProperties')
def device_properties(self) -> Optional[pulumi.Input['DevicePropertiesArgs']]:
'\n \n '
return pulumi.get(self, 'device_properties') |
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
'\n Resource ID.\n '
return pulumi.get(self, 'id') | 4,003,078,074,025,280,500 | Resource ID. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | id | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'id') |
@property
@pulumi.getter(name='ipAddress')
def ip_address(self) -> Optional[pulumi.Input[str]]:
'\n The ip-address for the vpn-site.\n '
return pulumi.get(self, 'ip_address') | 3,874,535,099,923,703,300 | The ip-address for the vpn-site. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | ip_address | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter(name='ipAddress')
def ip_address(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'ip_address') |
@property
@pulumi.getter(name='isSecuritySite')
def is_security_site(self) -> Optional[pulumi.Input[bool]]:
'\n IsSecuritySite flag.\n '
return pulumi.get(self, 'is_security_site') | -4,221,913,758,271,601,700 | IsSecuritySite flag. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | is_security_site | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter(name='isSecuritySite')
def is_security_site(self) -> Optional[pulumi.Input[bool]]:
'\n \n '
return pulumi.get(self, 'is_security_site') |
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
'\n Resource location.\n '
return pulumi.get(self, 'location') | 5,685,883,695,381,965,000 | Resource location. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | location | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'location') |
@property
@pulumi.getter(name='siteKey')
def site_key(self) -> Optional[pulumi.Input[str]]:
'\n The key for vpn-site that can be used for connections.\n '
return pulumi.get(self, 'site_key') | -973,204,070,441,350,500 | The key for vpn-site that can be used for connections. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | site_key | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter(name='siteKey')
def site_key(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'site_key') |
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]:
'\n Resource tags.\n '
return pulumi.get(self, 'tags') | -2,047,115,851,061,118,500 | Resource tags. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | tags | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]:
'\n \n '
return pulumi.get(self, 'tags') |
@property
@pulumi.getter(name='virtualWan')
def virtual_wan(self) -> Optional[pulumi.Input['SubResourceArgs']]:
'\n The VirtualWAN to which the vpnSite belongs.\n '
return pulumi.get(self, 'virtual_wan') | 3,641,573,313,436,819,000 | The VirtualWAN to which the vpnSite belongs. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | virtual_wan | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter(name='virtualWan')
def virtual_wan(self) -> Optional[pulumi.Input['SubResourceArgs']]:
'\n \n '
return pulumi.get(self, 'virtual_wan') |
@property
@pulumi.getter(name='vpnSiteLinks')
def vpn_site_links(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]]]:
'\n List of all vpn site links.\n '
return pulumi.get(self, 'vpn_site_links') | -1,688,591,939,661,649,700 | List of all vpn site links. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | vpn_site_links | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter(name='vpnSiteLinks')
def vpn_site_links(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]]]:
'\n \n '
return pulumi.get(self, 'vpn_site_links') |
@property
@pulumi.getter(name='vpnSiteName')
def vpn_site_name(self) -> Optional[pulumi.Input[str]]:
'\n The name of the VpnSite being created or updated.\n '
return pulumi.get(self, 'vpn_site_name') | 7,713,478,107,883,586,000 | The name of the VpnSite being created or updated. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | vpn_site_name | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter(name='vpnSiteName')
def vpn_site_name(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'vpn_site_name') |
@overload
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, address_space: Optional[pulumi.Input[pulumi.InputType['AddressSpaceArgs']]]=None, bgp_properties: Optional[pulumi.Input[pulumi.InputType['BgpSettingsArgs']]]=None, device_properties: Optional[pulumi.Input[pulumi.InputType['DevicePropertiesArgs']]]=None, id: Optional[pulumi.Input[str]]=None, ip_address: Optional[pulumi.Input[str]]=None, is_security_site: Optional[pulumi.Input[bool]]=None, location: Optional[pulumi.Input[str]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, site_key: Optional[pulumi.Input[str]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None, virtual_wan: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]]=None, vpn_site_links: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]]]=None, vpn_site_name: Optional[pulumi.Input[str]]=None, __props__=None):
"\n VpnSite Resource.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[pulumi.InputType['AddressSpaceArgs']] address_space: The AddressSpace that contains an array of IP address ranges.\n :param pulumi.Input[pulumi.InputType['BgpSettingsArgs']] bgp_properties: The set of bgp properties.\n :param pulumi.Input[pulumi.InputType['DevicePropertiesArgs']] device_properties: The device properties.\n :param pulumi.Input[str] id: Resource ID.\n :param pulumi.Input[str] ip_address: The ip-address for the vpn-site.\n :param pulumi.Input[bool] is_security_site: IsSecuritySite flag.\n :param pulumi.Input[str] location: Resource location.\n :param pulumi.Input[str] resource_group_name: The resource group name of the VpnSite.\n :param pulumi.Input[str] site_key: The key for vpn-site that can be used for connections.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.\n :param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_wan: The VirtualWAN to which the vpnSite belongs.\n :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]] vpn_site_links: List of all vpn site links.\n :param pulumi.Input[str] vpn_site_name: The name of the VpnSite being created or updated.\n "
... | 4,679,802,617,474,998,000 | VpnSite Resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['AddressSpaceArgs']] address_space: The AddressSpace that contains an array of IP address ranges.
:param pulumi.Input[pulumi.InputType['BgpSettingsArgs']] bgp_properties: The set of bgp properties.
:param pulumi.Input[pulumi.InputType['DevicePropertiesArgs']] device_properties: The device properties.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] ip_address: The ip-address for the vpn-site.
:param pulumi.Input[bool] is_security_site: IsSecuritySite flag.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The resource group name of the VpnSite.
:param pulumi.Input[str] site_key: The key for vpn-site that can be used for connections.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_wan: The VirtualWAN to which the vpnSite belongs.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]] vpn_site_links: List of all vpn site links.
:param pulumi.Input[str] vpn_site_name: The name of the VpnSite being created or updated. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | __init__ | sebtelko/pulumi-azure-native | python | @overload
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, address_space: Optional[pulumi.Input[pulumi.InputType['AddressSpaceArgs']]]=None, bgp_properties: Optional[pulumi.Input[pulumi.InputType['BgpSettingsArgs']]]=None, device_properties: Optional[pulumi.Input[pulumi.InputType['DevicePropertiesArgs']]]=None, id: Optional[pulumi.Input[str]]=None, ip_address: Optional[pulumi.Input[str]]=None, is_security_site: Optional[pulumi.Input[bool]]=None, location: Optional[pulumi.Input[str]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, site_key: Optional[pulumi.Input[str]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None, virtual_wan: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]]=None, vpn_site_links: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]]]=None, vpn_site_name: Optional[pulumi.Input[str]]=None, __props__=None):
"\n VpnSite Resource.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[pulumi.InputType['AddressSpaceArgs']] address_space: The AddressSpace that contains an array of IP address ranges.\n :param pulumi.Input[pulumi.InputType['BgpSettingsArgs']] bgp_properties: The set of bgp properties.\n :param pulumi.Input[pulumi.InputType['DevicePropertiesArgs']] device_properties: The device properties.\n :param pulumi.Input[str] id: Resource ID.\n :param pulumi.Input[str] ip_address: The ip-address for the vpn-site.\n :param pulumi.Input[bool] is_security_site: IsSecuritySite flag.\n :param pulumi.Input[str] location: Resource location.\n :param pulumi.Input[str] resource_group_name: The resource group name of the VpnSite.\n :param pulumi.Input[str] site_key: The key for vpn-site that can be used for connections.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.\n :param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_wan: The VirtualWAN to which the vpnSite belongs.\n :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]] vpn_site_links: List of all vpn site links.\n :param pulumi.Input[str] vpn_site_name: The name of the VpnSite being created or updated.\n "
... |
@overload
def __init__(__self__, resource_name: str, args: VpnSiteArgs, opts: Optional[pulumi.ResourceOptions]=None):
"\n VpnSite Resource.\n\n :param str resource_name: The name of the resource.\n :param VpnSiteArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n "
... | -3,237,702,773,393,285,000 | VpnSite Resource.
:param str resource_name: The name of the resource.
:param VpnSiteArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | __init__ | sebtelko/pulumi-azure-native | python | @overload
def __init__(__self__, resource_name: str, args: VpnSiteArgs, opts: Optional[pulumi.ResourceOptions]=None):
"\n VpnSite Resource.\n\n :param str resource_name: The name of the resource.\n :param VpnSiteArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n "
... |
@staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'VpnSite':
"\n Get an existing VpnSite resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = VpnSiteArgs.__new__(VpnSiteArgs)
__props__.__dict__['address_space'] = None
__props__.__dict__['bgp_properties'] = None
__props__.__dict__['device_properties'] = None
__props__.__dict__['etag'] = None
__props__.__dict__['ip_address'] = None
__props__.__dict__['is_security_site'] = None
__props__.__dict__['location'] = None
__props__.__dict__['name'] = None
__props__.__dict__['provisioning_state'] = None
__props__.__dict__['site_key'] = None
__props__.__dict__['tags'] = None
__props__.__dict__['type'] = None
__props__.__dict__['virtual_wan'] = None
__props__.__dict__['vpn_site_links'] = None
return VpnSite(resource_name, opts=opts, __props__=__props__) | -4,261,798,479,824,081,000 | Get an existing VpnSite resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | get | sebtelko/pulumi-azure-native | python | @staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'VpnSite':
"\n Get an existing VpnSite resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = VpnSiteArgs.__new__(VpnSiteArgs)
__props__.__dict__['address_space'] = None
__props__.__dict__['bgp_properties'] = None
__props__.__dict__['device_properties'] = None
__props__.__dict__['etag'] = None
__props__.__dict__['ip_address'] = None
__props__.__dict__['is_security_site'] = None
__props__.__dict__['location'] = None
__props__.__dict__['name'] = None
__props__.__dict__['provisioning_state'] = None
__props__.__dict__['site_key'] = None
__props__.__dict__['tags'] = None
__props__.__dict__['type'] = None
__props__.__dict__['virtual_wan'] = None
__props__.__dict__['vpn_site_links'] = None
return VpnSite(resource_name, opts=opts, __props__=__props__) |
@property
@pulumi.getter(name='addressSpace')
def address_space(self) -> pulumi.Output[Optional['outputs.AddressSpaceResponse']]:
'\n The AddressSpace that contains an array of IP address ranges.\n '
return pulumi.get(self, 'address_space') | 6,035,939,328,442,930,000 | The AddressSpace that contains an array of IP address ranges. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | address_space | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter(name='addressSpace')
def address_space(self) -> pulumi.Output[Optional['outputs.AddressSpaceResponse']]:
'\n \n '
return pulumi.get(self, 'address_space') |
@property
@pulumi.getter(name='bgpProperties')
def bgp_properties(self) -> pulumi.Output[Optional['outputs.BgpSettingsResponse']]:
'\n The set of bgp properties.\n '
return pulumi.get(self, 'bgp_properties') | -5,544,391,000,063,190,000 | The set of bgp properties. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | bgp_properties | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter(name='bgpProperties')
def bgp_properties(self) -> pulumi.Output[Optional['outputs.BgpSettingsResponse']]:
'\n \n '
return pulumi.get(self, 'bgp_properties') |
@property
@pulumi.getter(name='deviceProperties')
def device_properties(self) -> pulumi.Output[Optional['outputs.DevicePropertiesResponse']]:
'\n The device properties.\n '
return pulumi.get(self, 'device_properties') | -7,396,698,740,448,333,000 | The device properties. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | device_properties | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter(name='deviceProperties')
def device_properties(self) -> pulumi.Output[Optional['outputs.DevicePropertiesResponse']]:
'\n \n '
return pulumi.get(self, 'device_properties') |
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
'\n A unique read-only string that changes whenever the resource is updated.\n '
return pulumi.get(self, 'etag') | 5,960,741,373,667,297,000 | A unique read-only string that changes whenever the resource is updated. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | etag | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'etag') |
@property
@pulumi.getter(name='ipAddress')
def ip_address(self) -> pulumi.Output[Optional[str]]:
'\n The ip-address for the vpn-site.\n '
return pulumi.get(self, 'ip_address') | -3,467,924,975,983,858,000 | The ip-address for the vpn-site. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | ip_address | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter(name='ipAddress')
def ip_address(self) -> pulumi.Output[Optional[str]]:
'\n \n '
return pulumi.get(self, 'ip_address') |
@property
@pulumi.getter(name='isSecuritySite')
def is_security_site(self) -> pulumi.Output[Optional[bool]]:
'\n IsSecuritySite flag.\n '
return pulumi.get(self, 'is_security_site') | -849,941,201,518,532,000 | IsSecuritySite flag. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | is_security_site | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter(name='isSecuritySite')
def is_security_site(self) -> pulumi.Output[Optional[bool]]:
'\n \n '
return pulumi.get(self, 'is_security_site') |
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
'\n Resource location.\n '
return pulumi.get(self, 'location') | -605,776,475,662,102,400 | Resource location. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | location | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'location') |
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
'\n Resource name.\n '
return pulumi.get(self, 'name') | 4,695,236,134,441,039,000 | Resource name. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | name | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'name') |
@property
@pulumi.getter(name='provisioningState')
def provisioning_state(self) -> pulumi.Output[str]:
'\n The provisioning state of the VPN site resource.\n '
return pulumi.get(self, 'provisioning_state') | -854,177,058,422,730,600 | The provisioning state of the VPN site resource. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | provisioning_state | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter(name='provisioningState')
def provisioning_state(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'provisioning_state') |
@property
@pulumi.getter(name='siteKey')
def site_key(self) -> pulumi.Output[Optional[str]]:
'\n The key for vpn-site that can be used for connections.\n '
return pulumi.get(self, 'site_key') | 7,705,280,787,700,509,000 | The key for vpn-site that can be used for connections. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | site_key | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter(name='siteKey')
def site_key(self) -> pulumi.Output[Optional[str]]:
'\n \n '
return pulumi.get(self, 'site_key') |
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[(str, str)]]]:
'\n Resource tags.\n '
return pulumi.get(self, 'tags') | -2,929,197,049,816,896,000 | Resource tags. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | tags | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[(str, str)]]]:
'\n \n '
return pulumi.get(self, 'tags') |
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
'\n Resource type.\n '
return pulumi.get(self, 'type') | 2,132,950,812,122,862,800 | Resource type. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | type | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'type') |
@property
@pulumi.getter(name='virtualWan')
def virtual_wan(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
'\n The VirtualWAN to which the vpnSite belongs.\n '
return pulumi.get(self, 'virtual_wan') | 5,332,521,942,094,007,000 | The VirtualWAN to which the vpnSite belongs. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | virtual_wan | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter(name='virtualWan')
def virtual_wan(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
'\n \n '
return pulumi.get(self, 'virtual_wan') |
@property
@pulumi.getter(name='vpnSiteLinks')
def vpn_site_links(self) -> pulumi.Output[Optional[Sequence['outputs.VpnSiteLinkResponse']]]:
'\n List of all vpn site links.\n '
return pulumi.get(self, 'vpn_site_links') | -5,086,771,345,091,703,000 | List of all vpn site links. | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | vpn_site_links | sebtelko/pulumi-azure-native | python | @property
@pulumi.getter(name='vpnSiteLinks')
def vpn_site_links(self) -> pulumi.Output[Optional[Sequence['outputs.VpnSiteLinkResponse']]]:
'\n \n '
return pulumi.get(self, 'vpn_site_links') |
@click.group('r2dt')
def cli():
'\n A group of commands for parsing data from secondary structures into an\n importable format.\n '
pass | 5,622,575,217,022,508,000 | A group of commands for parsing data from secondary structures into an
importable format. | rnacentral_pipeline/cli/r2dt.py | cli | RNAcentral/rnacentral-import-pipeline | python | @click.group('r2dt')
def cli():
'\n A group of commands for parsing data from secondary structures into an\n importable format.\n '
pass |
@cli.command('process-svgs')
@click.option('--allow-missing', is_flag=True, default=False)
@click.argument('model_info', type=click.File('r'))
@click.argument('directory', type=click.Path())
@click.argument('output', type=click.File('w'))
def process_svgs(model_info, directory, output, allow_missing=False):
'\n Process all SVG secondary structures in the given directory and produce a\n single data file that can be imported into the database.\n '
r2dt.write(model_info, directory, output, allow_missing=allow_missing) | 3,233,411,465,854,024 | Process all SVG secondary structures in the given directory and produce a
single data file that can be imported into the database. | rnacentral_pipeline/cli/r2dt.py | process_svgs | RNAcentral/rnacentral-import-pipeline | python | @cli.command('process-svgs')
@click.option('--allow-missing', is_flag=True, default=False)
@click.argument('model_info', type=click.File('r'))
@click.argument('directory', type=click.Path())
@click.argument('output', type=click.File('w'))
def process_svgs(model_info, directory, output, allow_missing=False):
'\n Process all SVG secondary structures in the given directory and produce a\n single data file that can be imported into the database.\n '
r2dt.write(model_info, directory, output, allow_missing=allow_missing) |
@cli.group('should-show')
def should_show():
'\n Some commands relating to building a model for should show as well as\n running it.\n ' | -8,246,602,746,235,892,000 | Some commands relating to building a model for should show as well as
running it. | rnacentral_pipeline/cli/r2dt.py | should_show | RNAcentral/rnacentral-import-pipeline | python | @cli.group('should-show')
def should_show():
'\n Some commands relating to building a model for should show as well as\n running it.\n ' |
@should_show.command('convert-sheet')
@click.argument('filename', type=click.File('r'))
@click.argument('output', type=click.File('w'))
def convert_sheet(filename, output):
"\n This command is to convert a downloaded google sheet csv into a csv that can\n be used for training data. Often we will build a spreadsheet of example URS\n and then use that to build a training set. It is nice since you can embedd\n an SVG in google sheets so it is fast for us to compare several of them.\n\n In order to move that back into the training data you can download that\n sheet as a CSV and then run this command on it to build the CSV that is used\n in training. It requires there be a 'urs' and 'Labeled Should show' column\n to build the CSV. The values in labeled should show must be true/false\n (ignoring case).\n "
r2dt.write_converted_sheet(filename, output) | -4,815,811,023,913,836,000 | This command is to convert a downloaded google sheet csv into a csv that can
be used for training data. Often we will build a spreadsheet of example URS
and then use that to build a training set. It is nice since you can embedd
an SVG in google sheets so it is fast for us to compare several of them.
In order to move that back into the training data you can download that
sheet as a CSV and then run this command on it to build the CSV that is used
in training. It requires there be a 'urs' and 'Labeled Should show' column
to build the CSV. The values in labeled should show must be true/false
(ignoring case). | rnacentral_pipeline/cli/r2dt.py | convert_sheet | RNAcentral/rnacentral-import-pipeline | python | @should_show.command('convert-sheet')
@click.argument('filename', type=click.File('r'))
@click.argument('output', type=click.File('w'))
def convert_sheet(filename, output):
"\n This command is to convert a downloaded google sheet csv into a csv that can\n be used for training data. Often we will build a spreadsheet of example URS\n and then use that to build a training set. It is nice since you can embedd\n an SVG in google sheets so it is fast for us to compare several of them.\n\n In order to move that back into the training data you can download that\n sheet as a CSV and then run this command on it to build the CSV that is used\n in training. It requires there be a 'urs' and 'Labeled Should show' column\n to build the CSV. The values in labeled should show must be true/false\n (ignoring case).\n "
r2dt.write_converted_sheet(filename, output) |
@should_show.command('fetch-data')
@click.option('--db-url', envvar='PGDATABASE')
@click.argument('filename', type=click.File('r'))
@click.argument('output', type=click.File('w'))
def fetch_training_data(filename, output, db_url=None):
'\n This builds a CSV file of training data to use for the model building. I\n keep it separate so I can build a training csv and play with it interactivly\n before committing the final modeling building logic to the pipeline.\n '
r2dt.write_training_data(filename, db_url, output) | 7,687,694,629,916,893,000 | This builds a CSV file of training data to use for the model building. I
keep it separate so I can build a training csv and play with it interactivly
before committing the final modeling building logic to the pipeline. | rnacentral_pipeline/cli/r2dt.py | fetch_training_data | RNAcentral/rnacentral-import-pipeline | python | @should_show.command('fetch-data')
@click.option('--db-url', envvar='PGDATABASE')
@click.argument('filename', type=click.File('r'))
@click.argument('output', type=click.File('w'))
def fetch_training_data(filename, output, db_url=None):
'\n This builds a CSV file of training data to use for the model building. I\n keep it separate so I can build a training csv and play with it interactivly\n before committing the final modeling building logic to the pipeline.\n '
r2dt.write_training_data(filename, db_url, output) |
@should_show.command('inspect-data')
@click.option('--db-url', envvar='PGDATABASE')
@click.argument('filename', type=click.File('r'))
@click.argument('output', type=click.File('w'))
def fetch_inspect_data(filename, output, db_url=None):
'\n This is the command to use when trying to fetch more examples to add to the\n training set. This will fetch some information that is useful for a person\n to evaluate a diagram and decide if it should be true/false in the training\n set.\n '
r2dt.write_training_data(filename, db_url, output) | -1,115,318,413,282,255,600 | This is the command to use when trying to fetch more examples to add to the
training set. This will fetch some information that is useful for a person
to evaluate a diagram and decide if it should be true/false in the training
set. | rnacentral_pipeline/cli/r2dt.py | fetch_inspect_data | RNAcentral/rnacentral-import-pipeline | python | @should_show.command('inspect-data')
@click.option('--db-url', envvar='PGDATABASE')
@click.argument('filename', type=click.File('r'))
@click.argument('output', type=click.File('w'))
def fetch_inspect_data(filename, output, db_url=None):
'\n This is the command to use when trying to fetch more examples to add to the\n training set. This will fetch some information that is useful for a person\n to evaluate a diagram and decide if it should be true/false in the training\n set.\n '
r2dt.write_training_data(filename, db_url, output) |
@should_show.command('build-model')
@click.option('--db-url', envvar='PGDATABASE')
@click.argument('training-info', type=click.File('r'))
@click.argument('model', type=click.Path())
def build_model(training_info, model, db_url=None):
'\n This builds a model given then training information. The training\n information should be a csv file of:\n URS,flag\n The flag must be 1 or 0 to indicate if the URS should be shown or not. THis\n will fetch the data like the fetch-data command but will then build a model\n and write it out the the output file directly.\n '
r2dt.build_model(training_info, db_url, Path(model)) | -7,088,327,318,684,605,000 | This builds a model given then training information. The training
information should be a csv file of:
URS,flag
The flag must be 1 or 0 to indicate if the URS should be shown or not. THis
will fetch the data like the fetch-data command but will then build a model
and write it out the the output file directly. | rnacentral_pipeline/cli/r2dt.py | build_model | RNAcentral/rnacentral-import-pipeline | python | @should_show.command('build-model')
@click.option('--db-url', envvar='PGDATABASE')
@click.argument('training-info', type=click.File('r'))
@click.argument('model', type=click.Path())
def build_model(training_info, model, db_url=None):
'\n This builds a model given then training information. The training\n information should be a csv file of:\n URS,flag\n The flag must be 1 or 0 to indicate if the URS should be shown or not. THis\n will fetch the data like the fetch-data command but will then build a model\n and write it out the the output file directly.\n '
r2dt.build_model(training_info, db_url, Path(model)) |
@should_show.command('compute')
@click.option('--db-url', envvar='PGDATABASE')
@click.argument('model', type=click.Path())
@click.argument('filename', type=click.File('r'))
@click.argument('output', type=click.File('w'))
def write_should_show(model, filename, output, db_url=None):
'\n This computes the should show values for the data in the given file and a\n file listing urs ids to use. The data needed for the URS will be fetched\n from the database. This is meant to operate on large batches, like\n relabeling the entire database.\n '
r2dt.write_should_show(model, filename, db_url, output) | -1,106,030,058,217,618,800 | This computes the should show values for the data in the given file and a
file listing urs ids to use. The data needed for the URS will be fetched
from the database. This is meant to operate on large batches, like
relabeling the entire database. | rnacentral_pipeline/cli/r2dt.py | write_should_show | RNAcentral/rnacentral-import-pipeline | python | @should_show.command('compute')
@click.option('--db-url', envvar='PGDATABASE')
@click.argument('model', type=click.Path())
@click.argument('filename', type=click.File('r'))
@click.argument('output', type=click.File('w'))
def write_should_show(model, filename, output, db_url=None):
'\n This computes the should show values for the data in the given file and a\n file listing urs ids to use. The data needed for the URS will be fetched\n from the database. This is meant to operate on large batches, like\n relabeling the entire database.\n '
r2dt.write_should_show(model, filename, db_url, output) |
@cli.group('model-info')
def model_info():
'\n Commands for parsing and generating data files we can import into the\n database as model info files.\n '
pass | 6,172,029,846,261,659,000 | Commands for parsing and generating data files we can import into the
database as model info files. | rnacentral_pipeline/cli/r2dt.py | model_info | RNAcentral/rnacentral-import-pipeline | python | @cli.group('model-info')
def model_info():
'\n Commands for parsing and generating data files we can import into the\n database as model info files.\n '
pass |
@model_info.command('crw')
@click.argument('filename', type=click.File('r'))
@click.argument('output', default='-', type=click.File('w'))
def crw_model_info(filename, output):
'\n Parse the CRW metadata file and produce\n '
r2dt.write_crw(filename, output) | 2,648,798,942,224,611,300 | Parse the CRW metadata file and produce | rnacentral_pipeline/cli/r2dt.py | crw_model_info | RNAcentral/rnacentral-import-pipeline | python | @model_info.command('crw')
@click.argument('filename', type=click.File('r'))
@click.argument('output', default='-', type=click.File('w'))
def crw_model_info(filename, output):
'\n \n '
r2dt.write_crw(filename, output) |
@model_info.command('ribovision')
@click.argument('filename', type=click.File('r'))
@click.argument('output', default='-', type=click.File('w'))
def ribovision_model_info(filename, output):
'\n Parse the metadata.tsv file from R2DT for Ribovision models to\n produce something we can put in our database.\n '
r2dt.write_ribovision(filename, output) | 3,917,267,558,135,540,000 | Parse the metadata.tsv file from R2DT for Ribovision models to
produce something we can put in our database. | rnacentral_pipeline/cli/r2dt.py | ribovision_model_info | RNAcentral/rnacentral-import-pipeline | python | @model_info.command('ribovision')
@click.argument('filename', type=click.File('r'))
@click.argument('output', default='-', type=click.File('w'))
def ribovision_model_info(filename, output):
'\n Parse the metadata.tsv file from R2DT for Ribovision models to\n produce something we can put in our database.\n '
r2dt.write_ribovision(filename, output) |