body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
ee69d829669ccff93925b02449a790ee38e103410c7a68925c7787004c39aaeb | def Start(self):
'Schedules the ReadBuffer client action.'
pathspec = rdf_paths.PathSpec(path='\\\\.\\PhysicalDrive0\\', pathtype=rdf_paths.PathSpec.PathType.OS, path_options=rdf_paths.PathSpec.Options.CASE_LITERAL)
self.state.bytes_downloaded = 0
self.state.buffers = []
buffer_size = constants.CLIENT_MAX_BUFFER_SIZE
buffers_we_need = (self.args.length // buffer_size)
if (self.args.length % buffer_size):
buffers_we_need += 1
bytes_we_need = self.args.length
for i in range(buffers_we_need):
request = rdf_client.BufferReference(pathspec=pathspec, offset=(i * buffer_size), length=min(bytes_we_need, buffer_size))
self.CallClient(server_stubs.ReadBuffer, request, next_state=compatibility.GetName(self.StoreMBR))
bytes_we_need -= buffer_size | Schedules the ReadBuffer client action. | grr/server/grr_response_server/flows/general/transfer.py | Start | max-vogler/grr | 4,238 | python | def Start(self):
pathspec = rdf_paths.PathSpec(path='\\\\.\\PhysicalDrive0\\', pathtype=rdf_paths.PathSpec.PathType.OS, path_options=rdf_paths.PathSpec.Options.CASE_LITERAL)
self.state.bytes_downloaded = 0
self.state.buffers = []
buffer_size = constants.CLIENT_MAX_BUFFER_SIZE
buffers_we_need = (self.args.length // buffer_size)
if (self.args.length % buffer_size):
buffers_we_need += 1
bytes_we_need = self.args.length
for i in range(buffers_we_need):
request = rdf_client.BufferReference(pathspec=pathspec, offset=(i * buffer_size), length=min(bytes_we_need, buffer_size))
self.CallClient(server_stubs.ReadBuffer, request, next_state=compatibility.GetName(self.StoreMBR))
bytes_we_need -= buffer_size | def Start(self):
pathspec = rdf_paths.PathSpec(path='\\\\.\\PhysicalDrive0\\', pathtype=rdf_paths.PathSpec.PathType.OS, path_options=rdf_paths.PathSpec.Options.CASE_LITERAL)
self.state.bytes_downloaded = 0
self.state.buffers = []
buffer_size = constants.CLIENT_MAX_BUFFER_SIZE
buffers_we_need = (self.args.length // buffer_size)
if (self.args.length % buffer_size):
buffers_we_need += 1
bytes_we_need = self.args.length
for i in range(buffers_we_need):
request = rdf_client.BufferReference(pathspec=pathspec, offset=(i * buffer_size), length=min(bytes_we_need, buffer_size))
self.CallClient(server_stubs.ReadBuffer, request, next_state=compatibility.GetName(self.StoreMBR))
bytes_we_need -= buffer_size<|docstring|>Schedules the ReadBuffer client action.<|endoftext|> |
430d0eb21d1cfb9bc004beddf5707c4388304c2e73807d8f024676c393ceb161 | def StoreMBR(self, responses):
'This method stores the MBR.'
if (not responses.success):
msg = ('Could not retrieve MBR: %s' % responses.status)
self.Log(msg)
raise flow_base.FlowError(msg)
response = responses.First()
self.state.buffers.append(response.data)
self.state.bytes_downloaded += len(response.data)
if (self.state.bytes_downloaded >= self.args.length):
mbr_data = b''.join(self.state.buffers)
self.state.buffers = None
self.Log(('Successfully collected the MBR (%d bytes).' % len(mbr_data)))
self.SendReply(rdfvalue.RDFBytes(mbr_data)) | This method stores the MBR. | grr/server/grr_response_server/flows/general/transfer.py | StoreMBR | max-vogler/grr | 4,238 | python | def StoreMBR(self, responses):
if (not responses.success):
msg = ('Could not retrieve MBR: %s' % responses.status)
self.Log(msg)
raise flow_base.FlowError(msg)
response = responses.First()
self.state.buffers.append(response.data)
self.state.bytes_downloaded += len(response.data)
if (self.state.bytes_downloaded >= self.args.length):
mbr_data = b.join(self.state.buffers)
self.state.buffers = None
self.Log(('Successfully collected the MBR (%d bytes).' % len(mbr_data)))
self.SendReply(rdfvalue.RDFBytes(mbr_data)) | def StoreMBR(self, responses):
if (not responses.success):
msg = ('Could not retrieve MBR: %s' % responses.status)
self.Log(msg)
raise flow_base.FlowError(msg)
response = responses.First()
self.state.buffers.append(response.data)
self.state.bytes_downloaded += len(response.data)
if (self.state.bytes_downloaded >= self.args.length):
mbr_data = b.join(self.state.buffers)
self.state.buffers = None
self.Log(('Successfully collected the MBR (%d bytes).' % len(mbr_data)))
self.SendReply(rdfvalue.RDFBytes(mbr_data))<|docstring|>This method stores the MBR.<|endoftext|> |
bdcc6200e9962974b868188ffc246592208d84223f591159670d4fae11301baa | def Start(self):
'This issues the sendfile request.'
self.CallClient(server_stubs.SendFile, self.args, next_state=compatibility.GetName(self.Done)) | This issues the sendfile request. | grr/server/grr_response_server/flows/general/transfer.py | Start | max-vogler/grr | 4,238 | python | def Start(self):
self.CallClient(server_stubs.SendFile, self.args, next_state=compatibility.GetName(self.Done)) | def Start(self):
self.CallClient(server_stubs.SendFile, self.args, next_state=compatibility.GetName(self.Done))<|docstring|>This issues the sendfile request.<|endoftext|> |
980b0e854eae7a0d352e4d9cfc7b409763d35e84ddd14b82bdad639910858d40 | def test_number_separator_pass():
'Test passes for number_separator.'
input_text = 'hello2 people'
assert (number_separator(input_text) == 'hello 2 people') | Test passes for number_separator. | tests/test_number_separator.py | test_number_separator_pass | rezashabrang/cleaner-utils | 4 | python | def test_number_separator_pass():
input_text = 'hello2 people'
assert (number_separator(input_text) == 'hello 2 people') | def test_number_separator_pass():
input_text = 'hello2 people'
assert (number_separator(input_text) == 'hello 2 people')<|docstring|>Test passes for number_separator.<|endoftext|> |
99e164248a5398b487fd2d82eaf81634c9f27298db727009c39f310c96751e0d | def catchInterrupt(func):
"decorator : when interupt occurs the display is lost if you don't catch it\n TODO * <view>.stop_data_fetch() # stop\n\n ."
def catch_interrupt(*args, **kwargs):
try:
func(*args, **kwargs)
except KeyboardInterrupt:
pass
return catch_interrupt | decorator : when interupt occurs the display is lost if you don't catch it
TODO * <view>.stop_data_fetch() # stop
. | scripts/streams_aid.py | catchInterrupt | IBMStreams/sample.wikirecent | 1 | python | def catchInterrupt(func):
"decorator : when interupt occurs the display is lost if you don't catch it\n TODO * <view>.stop_data_fetch() # stop\n\n ."
def catch_interrupt(*args, **kwargs):
try:
func(*args, **kwargs)
except KeyboardInterrupt:
pass
return catch_interrupt | def catchInterrupt(func):
"decorator : when interupt occurs the display is lost if you don't catch it\n TODO * <view>.stop_data_fetch() # stop\n\n ."
def catch_interrupt(*args, **kwargs):
try:
func(*args, **kwargs)
except KeyboardInterrupt:
pass
return catch_interrupt<|docstring|>decorator : when interupt occurs the display is lost if you don't catch it
TODO * <view>.stop_data_fetch() # stop
.<|endoftext|> |
fc817ada6e6bcfcc4b1f5f116733170931804b590b8941c92b9022cf7c5ecd1c | def display_view_stop(eventView, period=2):
'Wrapper for streamsx.rest_primitives.View.display() to have button. '
button = widgets.Button(description='Stop Updating')
display(button)
eventView.display(period=period)
def on_button_clicked(b):
eventView.stop_data_fetch()
b.description = 'Stopped'
button.on_click(on_button_clicked) | Wrapper for streamsx.rest_primitives.View.display() to have button. | scripts/streams_aid.py | display_view_stop | IBMStreams/sample.wikirecent | 1 | python | def display_view_stop(eventView, period=2):
' '
button = widgets.Button(description='Stop Updating')
display(button)
eventView.display(period=period)
def on_button_clicked(b):
eventView.stop_data_fetch()
b.description = 'Stopped'
button.on_click(on_button_clicked) | def display_view_stop(eventView, period=2):
' '
button = widgets.Button(description='Stop Updating')
display(button)
eventView.display(period=period)
def on_button_clicked(b):
eventView.stop_data_fetch()
b.description = 'Stopped'
button.on_click(on_button_clicked)<|docstring|>Wrapper for streamsx.rest_primitives.View.display() to have button.<|endoftext|> |
eb9fb382c7582e1c9579b11d4c3178ff360830796af590ca28857853df7a70f0 | def view_events(views):
'\n Build interface to display a list of views and\n display view when selected from list.\n\n '
view_names = [view.name for view in views]
nameView = dict(zip(view_names, views))
select = widgets.RadioButtons(options=view_names, value=None, description='Select view to display', disabled=False)
def on_change(b):
if (b['name'] == 'label'):
clear_output(wait=True)
[view.stop_data_fetch() for view in views]
display(select)
display_view_stop(nameView[b['new']], period=2)
select.observe(on_change)
display(select) | Build interface to display a list of views and
display view when selected from list. | scripts/streams_aid.py | view_events | IBMStreams/sample.wikirecent | 1 | python | def view_events(views):
'\n Build interface to display a list of views and\n display view when selected from list.\n\n '
view_names = [view.name for view in views]
nameView = dict(zip(view_names, views))
select = widgets.RadioButtons(options=view_names, value=None, description='Select view to display', disabled=False)
def on_change(b):
if (b['name'] == 'label'):
clear_output(wait=True)
[view.stop_data_fetch() for view in views]
display(select)
display_view_stop(nameView[b['new']], period=2)
select.observe(on_change)
display(select) | def view_events(views):
'\n Build interface to display a list of views and\n display view when selected from list.\n\n '
view_names = [view.name for view in views]
nameView = dict(zip(view_names, views))
select = widgets.RadioButtons(options=view_names, value=None, description='Select view to display', disabled=False)
def on_change(b):
if (b['name'] == 'label'):
clear_output(wait=True)
[view.stop_data_fetch() for view in views]
display(select)
display_view_stop(nameView[b['new']], period=2)
select.observe(on_change)
display(select)<|docstring|>Build interface to display a list of views and
display view when selected from list.<|endoftext|> |
d6e275ecf0726e2bad97cc0f9a8e299315c7d4f19240380ed9fec689c18cfd58 | def find_job(instance, job_name=None):
'locate job within instance'
for job in instance.get_jobs():
if (job.applicationName.split('::')[(- 1)] == job_name):
return job
else:
return None | locate job within instance | scripts/streams_aid.py | find_job | IBMStreams/sample.wikirecent | 1 | python | def find_job(instance, job_name=None):
for job in instance.get_jobs():
if (job.applicationName.split('::')[(- 1)] == job_name):
return job
else:
return None | def find_job(instance, job_name=None):
for job in instance.get_jobs():
if (job.applicationName.split('::')[(- 1)] == job_name):
return job
else:
return None<|docstring|>locate job within instance<|endoftext|> |
df7f202dd733ee65beb7d18c006a8fc4efd6b60f48dc50b2de1e81879d663a7c | def display_views(instance, job_name):
'Locate/promote and display all views of a job'
job = find_job(instance, job_name=job_name)
if (job is None):
print('Failed to locate job')
else:
views = job.get_views()
view_events(views) | Locate/promote and display all views of a job | scripts/streams_aid.py | display_views | IBMStreams/sample.wikirecent | 1 | python | def display_views(instance, job_name):
job = find_job(instance, job_name=job_name)
if (job is None):
print('Failed to locate job')
else:
views = job.get_views()
view_events(views) | def display_views(instance, job_name):
job = find_job(instance, job_name=job_name)
if (job is None):
print('Failed to locate job')
else:
views = job.get_views()
view_events(views)<|docstring|>Locate/promote and display all views of a job<|endoftext|> |
35762aef971c1c4faba84256aa1adf42c5866af66a3ef16e79f2cbb43de3228a | def list_jobs(_instance=None, cancel=False):
'\n Interactive selection of jobs to cancel.\n\n Prompts with SelectMultiple widget, if thier are no jobs, your presente with a blank list.\n\n '
active_jobs = {'{}:{}'.format(job.name, job.health): job for job in _instance.get_jobs()}
selectMultiple_jobs = widgets.SelectMultiple(options=active_jobs.keys(), value=[], rows=len(active_jobs), description=('Cancel jobs(s)' if cancel else 'Active job(s):'), layout=Layout(width='60%'))
cancel_jobs = widgets.ToggleButton(value=False, description='Cancel', disabled=False, button_style='warning', tooltip='Delete selected jobs', icon='stop')
def on_value_change(change):
for job in selectMultiple_jobs.value:
print('canceling job:', job, active_jobs[job].cancel())
cancel_jobs.disabled = True
selectMultiple_jobs.disabled = True
cancel_jobs.observe(on_value_change, names='value')
if cancel:
return HBox([selectMultiple_jobs, cancel_jobs])
else:
return HBox([selectMultiple_jobs]) | Interactive selection of jobs to cancel.
Prompts with SelectMultiple widget, if thier are no jobs, your presente with a blank list. | scripts/streams_aid.py | list_jobs | IBMStreams/sample.wikirecent | 1 | python | def list_jobs(_instance=None, cancel=False):
'\n Interactive selection of jobs to cancel.\n\n Prompts with SelectMultiple widget, if thier are no jobs, your presente with a blank list.\n\n '
active_jobs = {'{}:{}'.format(job.name, job.health): job for job in _instance.get_jobs()}
selectMultiple_jobs = widgets.SelectMultiple(options=active_jobs.keys(), value=[], rows=len(active_jobs), description=('Cancel jobs(s)' if cancel else 'Active job(s):'), layout=Layout(width='60%'))
cancel_jobs = widgets.ToggleButton(value=False, description='Cancel', disabled=False, button_style='warning', tooltip='Delete selected jobs', icon='stop')
def on_value_change(change):
for job in selectMultiple_jobs.value:
print('canceling job:', job, active_jobs[job].cancel())
cancel_jobs.disabled = True
selectMultiple_jobs.disabled = True
cancel_jobs.observe(on_value_change, names='value')
if cancel:
return HBox([selectMultiple_jobs, cancel_jobs])
else:
return HBox([selectMultiple_jobs]) | def list_jobs(_instance=None, cancel=False):
'\n Interactive selection of jobs to cancel.\n\n Prompts with SelectMultiple widget, if thier are no jobs, your presente with a blank list.\n\n '
active_jobs = {'{}:{}'.format(job.name, job.health): job for job in _instance.get_jobs()}
selectMultiple_jobs = widgets.SelectMultiple(options=active_jobs.keys(), value=[], rows=len(active_jobs), description=('Cancel jobs(s)' if cancel else 'Active job(s):'), layout=Layout(width='60%'))
cancel_jobs = widgets.ToggleButton(value=False, description='Cancel', disabled=False, button_style='warning', tooltip='Delete selected jobs', icon='stop')
def on_value_change(change):
for job in selectMultiple_jobs.value:
print('canceling job:', job, active_jobs[job].cancel())
cancel_jobs.disabled = True
selectMultiple_jobs.disabled = True
cancel_jobs.observe(on_value_change, names='value')
if cancel:
return HBox([selectMultiple_jobs, cancel_jobs])
else:
return HBox([selectMultiple_jobs])<|docstring|>Interactive selection of jobs to cancel.
Prompts with SelectMultiple widget, if thier are no jobs, your presente with a blank list.<|endoftext|> |
b94294c351847f4178e73aa72dd1c079cb6294a3882d853a2cfa9054be2525e0 | def submitToStreams(topology, streams_cfg, streams_instance):
'Cancel if same name job is active and submit.\n Args:\n toplogy: streams application topology\n streams_cfg : connection information - from get_instance()\n streams_instance : from get_instance()\n Notes:\n Create local copy of the streams config so this can be thread-safe\n '
local_streams_cfg = dict(streams_cfg)
for job in streams_instance.get_jobs():
if (job.name == topology.name):
print('Cancelling old job:', job.name)
job.cancel()
job_config = context.JobConfig(job_name=topology.name, tracing='debug')
job_config.add(local_streams_cfg)
print('Building and submitting new job:', topology.name)
submission_result = context.submit('DISTRIBUTED', topology, local_streams_cfg)
return submission_result | Cancel if same name job is active and submit.
Args:
toplogy: streams application topology
streams_cfg : connection information - from get_instance()
streams_instance : from get_instance()
Notes:
Create local copy of the streams config so this can be thread-safe | scripts/streams_aid.py | submitToStreams | IBMStreams/sample.wikirecent | 1 | python | def submitToStreams(topology, streams_cfg, streams_instance):
'Cancel if same name job is active and submit.\n Args:\n toplogy: streams application topology\n streams_cfg : connection information - from get_instance()\n streams_instance : from get_instance()\n Notes:\n Create local copy of the streams config so this can be thread-safe\n '
local_streams_cfg = dict(streams_cfg)
for job in streams_instance.get_jobs():
if (job.name == topology.name):
print('Cancelling old job:', job.name)
job.cancel()
job_config = context.JobConfig(job_name=topology.name, tracing='debug')
job_config.add(local_streams_cfg)
print('Building and submitting new job:', topology.name)
submission_result = context.submit('DISTRIBUTED', topology, local_streams_cfg)
return submission_result | def submitToStreams(topology, streams_cfg, streams_instance):
'Cancel if same name job is active and submit.\n Args:\n toplogy: streams application topology\n streams_cfg : connection information - from get_instance()\n streams_instance : from get_instance()\n Notes:\n Create local copy of the streams config so this can be thread-safe\n '
local_streams_cfg = dict(streams_cfg)
for job in streams_instance.get_jobs():
if (job.name == topology.name):
print('Cancelling old job:', job.name)
job.cancel()
job_config = context.JobConfig(job_name=topology.name, tracing='debug')
job_config.add(local_streams_cfg)
print('Building and submitting new job:', topology.name)
submission_result = context.submit('DISTRIBUTED', topology, local_streams_cfg)
return submission_result<|docstring|>Cancel if same name job is active and submit.
Args:
toplogy: streams application topology
streams_cfg : connection information - from get_instance()
streams_instance : from get_instance()
Notes:
Create local copy of the streams config so this can be thread-safe<|endoftext|> |
0f4dc4524a2817fa86893bbe09ab6735dba5450d53c9fa910ac7c765c64f55a1 | def cloudSubmit(instance, streams_cloud_service, topology, credential):
'submit cloud streams instance\n Args:\n instance : when submitting from CP4D or Cloud\n streams_cloud_service : when submitting to cloud, the name of the service credential.py must appropriate mapping\n topology : topology to submit\n credential : streams instance cloud credential\n\n '
for job in instance.get_jobs():
if (job.name.find(topology.name) > 0):
print('Cancelling old job:', job.name)
job.cancel()
cloud = {context.ConfigParams.VCAP_SERVICES: credential.vcap_conf, context.ConfigParams.SERVICE_NAME: streams_cloud_service, context.ContextTypes.STREAMING_ANALYTICS_SERVICE: 'STREAMING_ANALYTIC', context.ConfigParams.FORCE_REMOTE_BUILD: True}
submission_result = context.submit('STREAMING_ANALYTICS_SERVICE', topology, config=cloud)
if submission_result.job:
report = 'JobId:{} Name:{} '.format(submission_result['id'], submission_result['name'])
else:
report = 'Somthing did work:{}'.format(submission_result) | submit cloud streams instance
Args:
instance : when submitting from CP4D or Cloud
streams_cloud_service : when submitting to cloud, the name of the service credential.py must appropriate mapping
topology : topology to submit
credential : streams instance cloud credential | scripts/streams_aid.py | cloudSubmit | IBMStreams/sample.wikirecent | 1 | python | def cloudSubmit(instance, streams_cloud_service, topology, credential):
'submit cloud streams instance\n Args:\n instance : when submitting from CP4D or Cloud\n streams_cloud_service : when submitting to cloud, the name of the service credential.py must appropriate mapping\n topology : topology to submit\n credential : streams instance cloud credential\n\n '
for job in instance.get_jobs():
if (job.name.find(topology.name) > 0):
print('Cancelling old job:', job.name)
job.cancel()
cloud = {context.ConfigParams.VCAP_SERVICES: credential.vcap_conf, context.ConfigParams.SERVICE_NAME: streams_cloud_service, context.ContextTypes.STREAMING_ANALYTICS_SERVICE: 'STREAMING_ANALYTIC', context.ConfigParams.FORCE_REMOTE_BUILD: True}
submission_result = context.submit('STREAMING_ANALYTICS_SERVICE', topology, config=cloud)
if submission_result.job:
report = 'JobId:{} Name:{} '.format(submission_result['id'], submission_result['name'])
else:
report = 'Somthing did work:{}'.format(submission_result) | def cloudSubmit(instance, streams_cloud_service, topology, credential):
'submit cloud streams instance\n Args:\n instance : when submitting from CP4D or Cloud\n streams_cloud_service : when submitting to cloud, the name of the service credential.py must appropriate mapping\n topology : topology to submit\n credential : streams instance cloud credential\n\n '
for job in instance.get_jobs():
if (job.name.find(topology.name) > 0):
print('Cancelling old job:', job.name)
job.cancel()
cloud = {context.ConfigParams.VCAP_SERVICES: credential.vcap_conf, context.ConfigParams.SERVICE_NAME: streams_cloud_service, context.ContextTypes.STREAMING_ANALYTICS_SERVICE: 'STREAMING_ANALYTIC', context.ConfigParams.FORCE_REMOTE_BUILD: True}
submission_result = context.submit('STREAMING_ANALYTICS_SERVICE', topology, config=cloud)
if submission_result.job:
report = 'JobId:{} Name:{} '.format(submission_result['id'], submission_result['name'])
else:
report = 'Somthing did work:{}'.format(submission_result)<|docstring|>submit cloud streams instance
Args:
instance : when submitting from CP4D or Cloud
streams_cloud_service : when submitting to cloud, the name of the service credential.py must appropriate mapping
topology : topology to submit
credential : streams instance cloud credential<|endoftext|> |
e1e38d4339297055803c6c1a3b76626346eda3726d3a66990db7590183faae6a | def get_instance(service_name='Steaming3Turbine'):
"Setup to access your Streams instance.\n\n ..note::The notebook is work within Cloud and ICP4D.\n Refer to the 'Setup' cells above.\n Returns:\n instance : Access to Streams instance, used for submitting and rendering views.\n "
try:
from icpd_core import icpd_util
import urllib3
global cfg
cfg[context.ConfigParams.SSL_VERIFY] = False
instance = rest.Instance.of_service(cfg)
print('Within ICP4D')
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
except ImportError:
cfg = None
print('Outside ICP4D')
import credential
sc = rest.StreamingAnalyticsConnection(service_name=service_name, vcap_services=credential.vcap_conf)
instance = sc.get_instances()[0]
return (instance, cfg) | Setup to access your Streams instance.
..note::The notebook is work within Cloud and ICP4D.
Refer to the 'Setup' cells above.
Returns:
instance : Access to Streams instance, used for submitting and rendering views. | scripts/streams_aid.py | get_instance | IBMStreams/sample.wikirecent | 1 | python | def get_instance(service_name='Steaming3Turbine'):
"Setup to access your Streams instance.\n\n ..note::The notebook is work within Cloud and ICP4D.\n Refer to the 'Setup' cells above.\n Returns:\n instance : Access to Streams instance, used for submitting and rendering views.\n "
try:
from icpd_core import icpd_util
import urllib3
global cfg
cfg[context.ConfigParams.SSL_VERIFY] = False
instance = rest.Instance.of_service(cfg)
print('Within ICP4D')
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
except ImportError:
cfg = None
print('Outside ICP4D')
import credential
sc = rest.StreamingAnalyticsConnection(service_name=service_name, vcap_services=credential.vcap_conf)
instance = sc.get_instances()[0]
return (instance, cfg) | def get_instance(service_name='Steaming3Turbine'):
"Setup to access your Streams instance.\n\n ..note::The notebook is work within Cloud and ICP4D.\n Refer to the 'Setup' cells above.\n Returns:\n instance : Access to Streams instance, used for submitting and rendering views.\n "
try:
from icpd_core import icpd_util
import urllib3
global cfg
cfg[context.ConfigParams.SSL_VERIFY] = False
instance = rest.Instance.of_service(cfg)
print('Within ICP4D')
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
except ImportError:
cfg = None
print('Outside ICP4D')
import credential
sc = rest.StreamingAnalyticsConnection(service_name=service_name, vcap_services=credential.vcap_conf)
instance = sc.get_instances()[0]
return (instance, cfg)<|docstring|>Setup to access your Streams instance.
..note::The notebook is work within Cloud and ICP4D.
Refer to the 'Setup' cells above.
Returns:
instance : Access to Streams instance, used for submitting and rendering views.<|endoftext|> |
f50b5d19d1a7b75282e01785b2796ba69b569be752826c93b954c4fb8a1b6118 | def make_response(success, http_status, data=None, message=None):
'Return formatted json response.'
json_result = jsonify({'success': success, 'message': message, 'data': data})
return (json_result, http_status) | Return formatted json response. | server/app/utils/response.py | make_response | PetrushynskyiOleksii/peliculas | 0 | python | def make_response(success, http_status, data=None, message=None):
json_result = jsonify({'success': success, 'message': message, 'data': data})
return (json_result, http_status) | def make_response(success, http_status, data=None, message=None):
json_result = jsonify({'success': success, 'message': message, 'data': data})
return (json_result, http_status)<|docstring|>Return formatted json response.<|endoftext|> |
4950a10e6b777b3b2ea821ddf730ce1a0690a4c3fb70c080f67fbaec936fc3c0 | def init(empty=False):
' (re-)initalizes the platform with devices. '
global DEVICES
DEVICES = ([] if empty else [MockToggleDevice('AC', STATE_ON), MockToggleDevice('AC', STATE_OFF), MockToggleDevice(None, STATE_OFF)]) | (re-)initalizes the platform with devices. | tests/config/custom_components/switch/test.py | init | evancohen/home-assistant | 14 | python | def init(empty=False):
' '
global DEVICES
DEVICES = ([] if empty else [MockToggleDevice('AC', STATE_ON), MockToggleDevice('AC', STATE_OFF), MockToggleDevice(None, STATE_OFF)]) | def init(empty=False):
' '
global DEVICES
DEVICES = ([] if empty else [MockToggleDevice('AC', STATE_ON), MockToggleDevice('AC', STATE_OFF), MockToggleDevice(None, STATE_OFF)])<|docstring|>(re-)initalizes the platform with devices.<|endoftext|> |
b4daf467b0d645b64dadfd2628e856aaad3598c8d2430498f95152e5209cef2f | def setup_platform(hass, config, add_devices_callback, discovery_info=None):
' Find and return test switches. '
add_devices_callback(DEVICES) | Find and return test switches. | tests/config/custom_components/switch/test.py | setup_platform | evancohen/home-assistant | 14 | python | def setup_platform(hass, config, add_devices_callback, discovery_info=None):
' '
add_devices_callback(DEVICES) | def setup_platform(hass, config, add_devices_callback, discovery_info=None):
' '
add_devices_callback(DEVICES)<|docstring|>Find and return test switches.<|endoftext|> |
724d1538b564fc16759e3838676df256e9686bd20347e4fb67292b64f97b27f5 | def make_table(rows: List[List[Any]], labels: Optional[List[Any]]=None, centered: bool=False) -> str:
'\n :param rows: 2D list containing objects that have a single-line representation (via `str`).\n All rows must be of the same length.\n :param labels: List containing the column labels. If present, the length must equal to that of each row.\n :param centered: If the items should be aligned to the center, else they are left aligned.\n :return: A table representing the rows passed in.\n '
row_length = None
if (not labels):
cols = 1
table_data = ''
for item in rows:
if (row_length is None):
row_length = len(str(item))
elif (len(str(item)) > row_length):
row_length = len(str(item))
for item in rows:
row = f'''β{str(item[(cols - 1)]).center(row_length)}β
'''
table_data = (table_data + row)
top_row_border = 'β{}β\n'.format(''.join([('β' * row_length)]))
bottom_row_border = 'β{}β'.format(''.join([('β' * row_length)]))
table = ((top_row_border + table_data) + bottom_row_border)
else:
cols = len(labels)
"\n str_list_data = str(list_data)[1:-1]\n str_list_data = str(str_list_data).replace(',|', ' ')\n table = (str_list_data)\n "
return table | :param rows: 2D list containing objects that have a single-line representation (via `str`).
All rows must be of the same length.
:param labels: List containing the column labels. If present, the length must equal to that of each row.
:param centered: If the items should be aligned to the center, else they are left aligned.
:return: A table representing the rows passed in. | qualifier/qualifier.py | make_table | chrisd149/cj8-qualifier | 0 | python | def make_table(rows: List[List[Any]], labels: Optional[List[Any]]=None, centered: bool=False) -> str:
'\n :param rows: 2D list containing objects that have a single-line representation (via `str`).\n All rows must be of the same length.\n :param labels: List containing the column labels. If present, the length must equal to that of each row.\n :param centered: If the items should be aligned to the center, else they are left aligned.\n :return: A table representing the rows passed in.\n '
row_length = None
if (not labels):
cols = 1
table_data =
for item in rows:
if (row_length is None):
row_length = len(str(item))
elif (len(str(item)) > row_length):
row_length = len(str(item))
for item in rows:
row = f'β{str(item[(cols - 1)]).center(row_length)}β
'
table_data = (table_data + row)
top_row_border = 'β{}β\n'.format(.join([('β' * row_length)]))
bottom_row_border = 'β{}β'.format(.join([('β' * row_length)]))
table = ((top_row_border + table_data) + bottom_row_border)
else:
cols = len(labels)
"\n str_list_data = str(list_data)[1:-1]\n str_list_data = str(str_list_data).replace(',|', ' ')\n table = (str_list_data)\n "
return table | def make_table(rows: List[List[Any]], labels: Optional[List[Any]]=None, centered: bool=False) -> str:
'\n :param rows: 2D list containing objects that have a single-line representation (via `str`).\n All rows must be of the same length.\n :param labels: List containing the column labels. If present, the length must equal to that of each row.\n :param centered: If the items should be aligned to the center, else they are left aligned.\n :return: A table representing the rows passed in.\n '
row_length = None
if (not labels):
cols = 1
table_data =
for item in rows:
if (row_length is None):
row_length = len(str(item))
elif (len(str(item)) > row_length):
row_length = len(str(item))
for item in rows:
row = f'β{str(item[(cols - 1)]).center(row_length)}β
'
table_data = (table_data + row)
top_row_border = 'β{}β\n'.format(.join([('β' * row_length)]))
bottom_row_border = 'β{}β'.format(.join([('β' * row_length)]))
table = ((top_row_border + table_data) + bottom_row_border)
else:
cols = len(labels)
"\n str_list_data = str(list_data)[1:-1]\n str_list_data = str(str_list_data).replace(',|', ' ')\n table = (str_list_data)\n "
return table<|docstring|>:param rows: 2D list containing objects that have a single-line representation (via `str`).
All rows must be of the same length.
:param labels: List containing the column labels. If present, the length must equal to that of each row.
:param centered: If the items should be aligned to the center, else they are left aligned.
:return: A table representing the rows passed in.<|endoftext|> |
cc6fc32ae8fc9935823d69be195c3b151e162fdfbfd9606103a0b49a4e5ededa | def setUp(self):
'\n Set up unit test\n '
self.install = InstallCfg()
self.r_velocity = 'nr02-vs500_lf.vel'
self.r_stations = 'one_stat.txt'
self.r_src = 'test_wh_ucsb.src'
self.r_srf = 'test_ucsb.srf'
self.sim_id = int(seqnum.get_seq_num())
a_indir = os.path.join(self.install.A_IN_DATA_DIR, str(self.sim_id))
a_tmpdir = os.path.join(self.install.A_TMP_DATA_DIR, str(self.sim_id))
a_outdir = os.path.join(self.install.A_OUT_DATA_DIR, str(self.sim_id))
a_logdir = os.path.join(self.install.A_OUT_LOG_DIR, str(self.sim_id))
bband_utils.mkdirs([a_indir, a_tmpdir, a_outdir, a_logdir], print_cmd=False)
a_refdir = os.path.join(self.install.A_TEST_REF_DIR, 'ucsb')
shutil.copy2(os.path.join(a_refdir, self.r_stations), a_indir)
shutil.copy2(os.path.join(a_refdir, self.r_velocity), a_indir)
shutil.copy2(os.path.join(a_refdir, self.r_src), a_indir)
shutil.copy2(os.path.join(a_refdir, self.r_srf), a_indir)
slo = StationList(os.path.join(a_indir, self.r_stations))
site_list = slo.getStationList()
for site in site_list:
shutil.copy2(os.path.join(a_refdir, ('%s.3comp' % site.scode)), a_tmpdir)
os.chdir(a_tmpdir) | Set up unit test | bbp/tests/test_uc_site.py | setUp | ZhangHCFJEA/bbp | 28 | python | def setUp(self):
'\n \n '
self.install = InstallCfg()
self.r_velocity = 'nr02-vs500_lf.vel'
self.r_stations = 'one_stat.txt'
self.r_src = 'test_wh_ucsb.src'
self.r_srf = 'test_ucsb.srf'
self.sim_id = int(seqnum.get_seq_num())
a_indir = os.path.join(self.install.A_IN_DATA_DIR, str(self.sim_id))
a_tmpdir = os.path.join(self.install.A_TMP_DATA_DIR, str(self.sim_id))
a_outdir = os.path.join(self.install.A_OUT_DATA_DIR, str(self.sim_id))
a_logdir = os.path.join(self.install.A_OUT_LOG_DIR, str(self.sim_id))
bband_utils.mkdirs([a_indir, a_tmpdir, a_outdir, a_logdir], print_cmd=False)
a_refdir = os.path.join(self.install.A_TEST_REF_DIR, 'ucsb')
shutil.copy2(os.path.join(a_refdir, self.r_stations), a_indir)
shutil.copy2(os.path.join(a_refdir, self.r_velocity), a_indir)
shutil.copy2(os.path.join(a_refdir, self.r_src), a_indir)
shutil.copy2(os.path.join(a_refdir, self.r_srf), a_indir)
slo = StationList(os.path.join(a_indir, self.r_stations))
site_list = slo.getStationList()
for site in site_list:
shutil.copy2(os.path.join(a_refdir, ('%s.3comp' % site.scode)), a_tmpdir)
os.chdir(a_tmpdir) | def setUp(self):
'\n \n '
self.install = InstallCfg()
self.r_velocity = 'nr02-vs500_lf.vel'
self.r_stations = 'one_stat.txt'
self.r_src = 'test_wh_ucsb.src'
self.r_srf = 'test_ucsb.srf'
self.sim_id = int(seqnum.get_seq_num())
a_indir = os.path.join(self.install.A_IN_DATA_DIR, str(self.sim_id))
a_tmpdir = os.path.join(self.install.A_TMP_DATA_DIR, str(self.sim_id))
a_outdir = os.path.join(self.install.A_OUT_DATA_DIR, str(self.sim_id))
a_logdir = os.path.join(self.install.A_OUT_LOG_DIR, str(self.sim_id))
bband_utils.mkdirs([a_indir, a_tmpdir, a_outdir, a_logdir], print_cmd=False)
a_refdir = os.path.join(self.install.A_TEST_REF_DIR, 'ucsb')
shutil.copy2(os.path.join(a_refdir, self.r_stations), a_indir)
shutil.copy2(os.path.join(a_refdir, self.r_velocity), a_indir)
shutil.copy2(os.path.join(a_refdir, self.r_src), a_indir)
shutil.copy2(os.path.join(a_refdir, self.r_srf), a_indir)
slo = StationList(os.path.join(a_indir, self.r_stations))
site_list = slo.getStationList()
for site in site_list:
shutil.copy2(os.path.join(a_refdir, ('%s.3comp' % site.scode)), a_tmpdir)
os.chdir(a_tmpdir)<|docstring|>Set up unit test<|endoftext|> |
60234349b4479311cb8e331c5e4d8dd1c41e24c9e0687362e59c2d4a9cfbcdb8 | def psd(wave, rate=None, units='decibels', scaling='density', kind='spectrogram', window_length=1024, window_overlap=50, window_shape='hann', pressure_reference=20.0):
"\n Estimate the power spectral density (psd) of a wave\n \n Parameters\n ----------\n wave: Wave object, file path to a WAV file, or numpy array of WAV signal samples\n \n rate: sample rate of signal, default = None\n required when 'wave' is a numpy array\n if 'None', the rate will be determined by the 'wave' object\n \n units: string, default = 'decibels'\n result units in 'decibels' or 'watts'\n \n scaling: string, default = 'density'\n result scaling, 'spectrum' or 'density'\n \n kind: string, default = 'spectrogram'\n result type, 'spectrogram', 'mean', or 'both'\n \n window_length: integer, default = 1000\n length of analysis window in number of samples\n \n window_overlap: integer, default = 50\n amount of analysis window overlap in percent\n \n window_shape: string, default = 'hann'\n shape of analysis window,\n refer to scipy.signal for window types\n \n pressure_reference: float, default = 20.\n reference pressure for measurements in air in micropascals\n "
if (type(wave) is not Wave):
wave = Wave(wave)
if (not hasattr(wave, 'samples')):
wave.read()
if (rate is None):
rate = wave.rate
if (units not in ['decibels', 'watts']):
raise ValueError("'{0}' are not acceptable units".format(units))
if (kind not in ['spectrogram', 'mean', 'both']):
raise ValueError("'{0}' is not an acceptable kind".format(kind))
window_overlap = (window_overlap / 100.0)
n_windows = int((np.ceil((wave.n_samples - (window_overlap * window_length))) / ((1 - window_overlap) * window_length)))
psd = np.array([np.empty(shape=(int(((window_length / 2) + 1)), n_windows)) for channel in wave.channels])
for channel in wave.channels:
(f, t, psd[channel]) = spectrogram(wave.samples[(:, channel)], fs=rate, window=window_shape, nperseg=window_length, noverlap=(window_length * window_overlap), return_onesided=True, scaling=scaling)
if (kind in ['mean', 'both']):
psd_mean = (psd.sum(axis=2) / psd.shape[2])
if (units == 'decibels'):
if (kind == 'mean'):
(f, t, (10 * np.log10((psd_mean / (pressure_reference ** 2)))))
elif (kind == 'both'):
return (f, t, (10 * np.log10((psd / (pressure_reference ** 2)))), (10 * np.log10((psd_mean / (pressure_reference ** 2)))))
else:
return (f, t, (10 * np.log10((psd / (pressure_reference ** 2)))))
elif (kind == 'mean'):
return (f, t, psd_mean)
elif (kind == 'both'):
return (f, t, psd, psd_mean)
else:
return (f, t, psd) | Estimate the power spectral density (psd) of a wave
Parameters
----------
wave: Wave object, file path to a WAV file, or numpy array of WAV signal samples
rate: sample rate of signal, default = None
required when 'wave' is a numpy array
if 'None', the rate will be determined by the 'wave' object
units: string, default = 'decibels'
result units in 'decibels' or 'watts'
scaling: string, default = 'density'
result scaling, 'spectrum' or 'density'
kind: string, default = 'spectrogram'
result type, 'spectrogram', 'mean', or 'both'
window_length: integer, default = 1000
length of analysis window in number of samples
window_overlap: integer, default = 50
amount of analysis window overlap in percent
window_shape: string, default = 'hann'
shape of analysis window,
refer to scipy.signal for window types
pressure_reference: float, default = 20.
reference pressure for measurements in air in micropascals | nacoustik/spectrum/analysis.py | psd | jacobdein/wavescape | 0 | python | def psd(wave, rate=None, units='decibels', scaling='density', kind='spectrogram', window_length=1024, window_overlap=50, window_shape='hann', pressure_reference=20.0):
"\n Estimate the power spectral density (psd) of a wave\n \n Parameters\n ----------\n wave: Wave object, file path to a WAV file, or numpy array of WAV signal samples\n \n rate: sample rate of signal, default = None\n required when 'wave' is a numpy array\n if 'None', the rate will be determined by the 'wave' object\n \n units: string, default = 'decibels'\n result units in 'decibels' or 'watts'\n \n scaling: string, default = 'density'\n result scaling, 'spectrum' or 'density'\n \n kind: string, default = 'spectrogram'\n result type, 'spectrogram', 'mean', or 'both'\n \n window_length: integer, default = 1000\n length of analysis window in number of samples\n \n window_overlap: integer, default = 50\n amount of analysis window overlap in percent\n \n window_shape: string, default = 'hann'\n shape of analysis window,\n refer to scipy.signal for window types\n \n pressure_reference: float, default = 20.\n reference pressure for measurements in air in micropascals\n "
if (type(wave) is not Wave):
wave = Wave(wave)
if (not hasattr(wave, 'samples')):
wave.read()
if (rate is None):
rate = wave.rate
if (units not in ['decibels', 'watts']):
raise ValueError("'{0}' are not acceptable units".format(units))
if (kind not in ['spectrogram', 'mean', 'both']):
raise ValueError("'{0}' is not an acceptable kind".format(kind))
window_overlap = (window_overlap / 100.0)
n_windows = int((np.ceil((wave.n_samples - (window_overlap * window_length))) / ((1 - window_overlap) * window_length)))
psd = np.array([np.empty(shape=(int(((window_length / 2) + 1)), n_windows)) for channel in wave.channels])
for channel in wave.channels:
(f, t, psd[channel]) = spectrogram(wave.samples[(:, channel)], fs=rate, window=window_shape, nperseg=window_length, noverlap=(window_length * window_overlap), return_onesided=True, scaling=scaling)
if (kind in ['mean', 'both']):
psd_mean = (psd.sum(axis=2) / psd.shape[2])
if (units == 'decibels'):
if (kind == 'mean'):
(f, t, (10 * np.log10((psd_mean / (pressure_reference ** 2)))))
elif (kind == 'both'):
return (f, t, (10 * np.log10((psd / (pressure_reference ** 2)))), (10 * np.log10((psd_mean / (pressure_reference ** 2)))))
else:
return (f, t, (10 * np.log10((psd / (pressure_reference ** 2)))))
elif (kind == 'mean'):
return (f, t, psd_mean)
elif (kind == 'both'):
return (f, t, psd, psd_mean)
else:
return (f, t, psd) | def psd(wave, rate=None, units='decibels', scaling='density', kind='spectrogram', window_length=1024, window_overlap=50, window_shape='hann', pressure_reference=20.0):
"\n Estimate the power spectral density (psd) of a wave\n \n Parameters\n ----------\n wave: Wave object, file path to a WAV file, or numpy array of WAV signal samples\n \n rate: sample rate of signal, default = None\n required when 'wave' is a numpy array\n if 'None', the rate will be determined by the 'wave' object\n \n units: string, default = 'decibels'\n result units in 'decibels' or 'watts'\n \n scaling: string, default = 'density'\n result scaling, 'spectrum' or 'density'\n \n kind: string, default = 'spectrogram'\n result type, 'spectrogram', 'mean', or 'both'\n \n window_length: integer, default = 1000\n length of analysis window in number of samples\n \n window_overlap: integer, default = 50\n amount of analysis window overlap in percent\n \n window_shape: string, default = 'hann'\n shape of analysis window,\n refer to scipy.signal for window types\n \n pressure_reference: float, default = 20.\n reference pressure for measurements in air in micropascals\n "
if (type(wave) is not Wave):
wave = Wave(wave)
if (not hasattr(wave, 'samples')):
wave.read()
if (rate is None):
rate = wave.rate
if (units not in ['decibels', 'watts']):
raise ValueError("'{0}' are not acceptable units".format(units))
if (kind not in ['spectrogram', 'mean', 'both']):
raise ValueError("'{0}' is not an acceptable kind".format(kind))
window_overlap = (window_overlap / 100.0)
n_windows = int((np.ceil((wave.n_samples - (window_overlap * window_length))) / ((1 - window_overlap) * window_length)))
psd = np.array([np.empty(shape=(int(((window_length / 2) + 1)), n_windows)) for channel in wave.channels])
for channel in wave.channels:
(f, t, psd[channel]) = spectrogram(wave.samples[(:, channel)], fs=rate, window=window_shape, nperseg=window_length, noverlap=(window_length * window_overlap), return_onesided=True, scaling=scaling)
if (kind in ['mean', 'both']):
psd_mean = (psd.sum(axis=2) / psd.shape[2])
if (units == 'decibels'):
if (kind == 'mean'):
(f, t, (10 * np.log10((psd_mean / (pressure_reference ** 2)))))
elif (kind == 'both'):
return (f, t, (10 * np.log10((psd / (pressure_reference ** 2)))), (10 * np.log10((psd_mean / (pressure_reference ** 2)))))
else:
return (f, t, (10 * np.log10((psd / (pressure_reference ** 2)))))
elif (kind == 'mean'):
return (f, t, psd_mean)
elif (kind == 'both'):
return (f, t, psd, psd_mean)
else:
return (f, t, psd)<|docstring|>Estimate the power spectral density (psd) of a wave
Parameters
----------
wave: Wave object, file path to a WAV file, or numpy array of WAV signal samples
rate: sample rate of signal, default = None
required when 'wave' is a numpy array
if 'None', the rate will be determined by the 'wave' object
units: string, default = 'decibels'
result units in 'decibels' or 'watts'
scaling: string, default = 'density'
result scaling, 'spectrum' or 'density'
kind: string, default = 'spectrogram'
result type, 'spectrogram', 'mean', or 'both'
window_length: integer, default = 1000
length of analysis window in number of samples
window_overlap: integer, default = 50
amount of analysis window overlap in percent
window_shape: string, default = 'hann'
shape of analysis window,
refer to scipy.signal for window types
pressure_reference: float, default = 20.
reference pressure for measurements in air in micropascals<|endoftext|> |
97ea1a7d8ca0b3e5f7794bd34b2b94d2313c52308e667e321587a4999f88ff59 | def sel(a, rate, duration, b=None, limit=2000, bin_width=1000, return_bins=False):
'\n Estimate the sound exposure level (sel) per minute from a wave\n \n Parameters\n ----------\n a: numpy float64 array, required\n a 3d array (channels, frequency bands, time steps)\n representing the psd (power spectral density)\n spectrogram of a wave signal in decibels\n \n rate: sample rate of signal, required\n \n duration: duration of signal in minutes, required\n \n b: numpy float64 array, default = None\n a 3d array (channels, frequency bands, time steps)\n representing the spectrogram of a wave signal (with ale applied)\n \n limit: numpy float64 array, default = None\n frequency separating anthrophony and biophony\n \n bin_width: int, default = 1000\n width of frequency bins in herz\n \n bins: boolean, default = False\n return values for each frequency bin of specified bin_width\n\n '
a = (10 ** (a / 10))
f_delta = ((rate / 2) / (a.shape[1] - 1))
a = (a * f_delta)
if (b is None):
f = np.arange(0, ((rate / 2) + f_delta), f_delta)
bins = np.arange(0, (rate / 2), bin_width)
bin_bound_indicies = np.searchsorted(f, bins)
bin_bound_indicies = np.append(bin_bound_indicies, a.shape[1])
sel = np.empty(len(bins))
for i in (bins / bin_width).astype(np.int):
low_bound = bin_bound_indicies[i]
high_bound = bin_bound_indicies[(i + 1)]
sel[i] = a[(:, low_bound:high_bound, :)].sum()
sel = (sel / duration)
anthrophony = sel[0:2].sum()
biophony = sel[2:10].sum()
else:
return_bins = False
b = (10 ** (b / 10))
b = (b * f_delta)
anthrophony = ((a - (b.data * np.invert(b.mask))).sum() / duration)
biophony = (b.sum() / duration)
anthrophony = (10 * np.log10(anthrophony))
biophony = (10 * np.log10(biophony))
if (return_bins is True):
sel = (10 * np.log10(sel))
return (sel, anthrophony, biophony)
else:
return (anthrophony, biophony) | Estimate the sound exposure level (sel) per minute from a wave
Parameters
----------
a: numpy float64 array, required
a 3d array (channels, frequency bands, time steps)
representing the psd (power spectral density)
spectrogram of a wave signal in decibels
rate: sample rate of signal, required
duration: duration of signal in minutes, required
b: numpy float64 array, default = None
a 3d array (channels, frequency bands, time steps)
representing the spectrogram of a wave signal (with ale applied)
limit: numpy float64 array, default = None
frequency separating anthrophony and biophony
bin_width: int, default = 1000
width of frequency bins in herz
bins: boolean, default = False
return values for each frequency bin of specified bin_width | nacoustik/spectrum/analysis.py | sel | jacobdein/wavescape | 0 | python | def sel(a, rate, duration, b=None, limit=2000, bin_width=1000, return_bins=False):
'\n Estimate the sound exposure level (sel) per minute from a wave\n \n Parameters\n ----------\n a: numpy float64 array, required\n a 3d array (channels, frequency bands, time steps)\n representing the psd (power spectral density)\n spectrogram of a wave signal in decibels\n \n rate: sample rate of signal, required\n \n duration: duration of signal in minutes, required\n \n b: numpy float64 array, default = None\n a 3d array (channels, frequency bands, time steps)\n representing the spectrogram of a wave signal (with ale applied)\n \n limit: numpy float64 array, default = None\n frequency separating anthrophony and biophony\n \n bin_width: int, default = 1000\n width of frequency bins in herz\n \n bins: boolean, default = False\n return values for each frequency bin of specified bin_width\n\n '
a = (10 ** (a / 10))
f_delta = ((rate / 2) / (a.shape[1] - 1))
a = (a * f_delta)
if (b is None):
f = np.arange(0, ((rate / 2) + f_delta), f_delta)
bins = np.arange(0, (rate / 2), bin_width)
bin_bound_indicies = np.searchsorted(f, bins)
bin_bound_indicies = np.append(bin_bound_indicies, a.shape[1])
sel = np.empty(len(bins))
for i in (bins / bin_width).astype(np.int):
low_bound = bin_bound_indicies[i]
high_bound = bin_bound_indicies[(i + 1)]
sel[i] = a[(:, low_bound:high_bound, :)].sum()
sel = (sel / duration)
anthrophony = sel[0:2].sum()
biophony = sel[2:10].sum()
else:
return_bins = False
b = (10 ** (b / 10))
b = (b * f_delta)
anthrophony = ((a - (b.data * np.invert(b.mask))).sum() / duration)
biophony = (b.sum() / duration)
anthrophony = (10 * np.log10(anthrophony))
biophony = (10 * np.log10(biophony))
if (return_bins is True):
sel = (10 * np.log10(sel))
return (sel, anthrophony, biophony)
else:
return (anthrophony, biophony) | def sel(a, rate, duration, b=None, limit=2000, bin_width=1000, return_bins=False):
'\n Estimate the sound exposure level (sel) per minute from a wave\n \n Parameters\n ----------\n a: numpy float64 array, required\n a 3d array (channels, frequency bands, time steps)\n representing the psd (power spectral density)\n spectrogram of a wave signal in decibels\n \n rate: sample rate of signal, required\n \n duration: duration of signal in minutes, required\n \n b: numpy float64 array, default = None\n a 3d array (channels, frequency bands, time steps)\n representing the spectrogram of a wave signal (with ale applied)\n \n limit: numpy float64 array, default = None\n frequency separating anthrophony and biophony\n \n bin_width: int, default = 1000\n width of frequency bins in herz\n \n bins: boolean, default = False\n return values for each frequency bin of specified bin_width\n\n '
a = (10 ** (a / 10))
f_delta = ((rate / 2) / (a.shape[1] - 1))
a = (a * f_delta)
if (b is None):
f = np.arange(0, ((rate / 2) + f_delta), f_delta)
bins = np.arange(0, (rate / 2), bin_width)
bin_bound_indicies = np.searchsorted(f, bins)
bin_bound_indicies = np.append(bin_bound_indicies, a.shape[1])
sel = np.empty(len(bins))
for i in (bins / bin_width).astype(np.int):
low_bound = bin_bound_indicies[i]
high_bound = bin_bound_indicies[(i + 1)]
sel[i] = a[(:, low_bound:high_bound, :)].sum()
sel = (sel / duration)
anthrophony = sel[0:2].sum()
biophony = sel[2:10].sum()
else:
return_bins = False
b = (10 ** (b / 10))
b = (b * f_delta)
anthrophony = ((a - (b.data * np.invert(b.mask))).sum() / duration)
biophony = (b.sum() / duration)
anthrophony = (10 * np.log10(anthrophony))
biophony = (10 * np.log10(biophony))
if (return_bins is True):
sel = (10 * np.log10(sel))
return (sel, anthrophony, biophony)
else:
return (anthrophony, biophony)<|docstring|>Estimate the sound exposure level (sel) per minute from a wave
Parameters
----------
a: numpy float64 array, required
a 3d array (channels, frequency bands, time steps)
representing the psd (power spectral density)
spectrogram of a wave signal in decibels
rate: sample rate of signal, required
duration: duration of signal in minutes, required
b: numpy float64 array, default = None
a 3d array (channels, frequency bands, time steps)
representing the spectrogram of a wave signal (with ale applied)
limit: numpy float64 array, default = None
frequency separating anthrophony and biophony
bin_width: int, default = 1000
width of frequency bins in herz
bins: boolean, default = False
return values for each frequency bin of specified bin_width<|endoftext|> |
c1f6a10ab2140078c9c7bc5fa8d50e317a70dec189426587b54da51c97e41b64 | def __init__(__self__, *, user: pulumi.Input[str], pgp_key: Optional[pulumi.Input[str]]=None, status: Optional[pulumi.Input[str]]=None):
'\n The set of arguments for constructing a AccessKey resource.\n :param pulumi.Input[str] user: IAM user to associate with this access key.\n :param pulumi.Input[str] pgp_key: Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.\n :param pulumi.Input[str] status: Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.\n '
pulumi.set(__self__, 'user', user)
if (pgp_key is not None):
pulumi.set(__self__, 'pgp_key', pgp_key)
if (status is not None):
pulumi.set(__self__, 'status', status) | The set of arguments for constructing a AccessKey resource.
:param pulumi.Input[str] user: IAM user to associate with this access key.
:param pulumi.Input[str] pgp_key: Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.
:param pulumi.Input[str] status: Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`. | sdk/python/pulumi_aws/iam/access_key.py | __init__ | rapzo/pulumi-aws | 260 | python | def __init__(__self__, *, user: pulumi.Input[str], pgp_key: Optional[pulumi.Input[str]]=None, status: Optional[pulumi.Input[str]]=None):
'\n The set of arguments for constructing a AccessKey resource.\n :param pulumi.Input[str] user: IAM user to associate with this access key.\n :param pulumi.Input[str] pgp_key: Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.\n :param pulumi.Input[str] status: Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.\n '
pulumi.set(__self__, 'user', user)
if (pgp_key is not None):
pulumi.set(__self__, 'pgp_key', pgp_key)
if (status is not None):
pulumi.set(__self__, 'status', status) | def __init__(__self__, *, user: pulumi.Input[str], pgp_key: Optional[pulumi.Input[str]]=None, status: Optional[pulumi.Input[str]]=None):
'\n The set of arguments for constructing a AccessKey resource.\n :param pulumi.Input[str] user: IAM user to associate with this access key.\n :param pulumi.Input[str] pgp_key: Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.\n :param pulumi.Input[str] status: Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.\n '
pulumi.set(__self__, 'user', user)
if (pgp_key is not None):
pulumi.set(__self__, 'pgp_key', pgp_key)
if (status is not None):
pulumi.set(__self__, 'status', status)<|docstring|>The set of arguments for constructing a AccessKey resource.
:param pulumi.Input[str] user: IAM user to associate with this access key.
:param pulumi.Input[str] pgp_key: Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.
:param pulumi.Input[str] status: Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.<|endoftext|> |
9782a684b3044a623f4920950b034bb79a9fa8783d5097e6735e6c2f309c4aad | @property
@pulumi.getter
def user(self) -> pulumi.Input[str]:
'\n IAM user to associate with this access key.\n '
return pulumi.get(self, 'user') | IAM user to associate with this access key. | sdk/python/pulumi_aws/iam/access_key.py | user | rapzo/pulumi-aws | 260 | python | @property
@pulumi.getter
def user(self) -> pulumi.Input[str]:
'\n \n '
return pulumi.get(self, 'user') | @property
@pulumi.getter
def user(self) -> pulumi.Input[str]:
'\n \n '
return pulumi.get(self, 'user')<|docstring|>IAM user to associate with this access key.<|endoftext|> |
c78fe070d9714d3f4992838b5cf041dff6e1b69882576d35ea0b6d573ad171a6 | @property
@pulumi.getter(name='pgpKey')
def pgp_key(self) -> Optional[pulumi.Input[str]]:
'\n Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.\n '
return pulumi.get(self, 'pgp_key') | Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute. | sdk/python/pulumi_aws/iam/access_key.py | pgp_key | rapzo/pulumi-aws | 260 | python | @property
@pulumi.getter(name='pgpKey')
def pgp_key(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'pgp_key') | @property
@pulumi.getter(name='pgpKey')
def pgp_key(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'pgp_key')<|docstring|>Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.<|endoftext|> |
4d9325eb6593746375c0418566825284d0e7655b5ca752a464ec5f116cf125b7 | @property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
'\n Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.\n '
return pulumi.get(self, 'status') | Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`. | sdk/python/pulumi_aws/iam/access_key.py | status | rapzo/pulumi-aws | 260 | python | @property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'status') | @property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'status')<|docstring|>Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.<|endoftext|> |
5a196de304c53603f9c6d0ff566b75a2949cb8b1ef093bd1dfe5daed16b5e88a | def __init__(__self__, *, create_date: Optional[pulumi.Input[str]]=None, encrypted_secret: Optional[pulumi.Input[str]]=None, encrypted_ses_smtp_password_v4: Optional[pulumi.Input[str]]=None, key_fingerprint: Optional[pulumi.Input[str]]=None, pgp_key: Optional[pulumi.Input[str]]=None, secret: Optional[pulumi.Input[str]]=None, ses_smtp_password_v4: Optional[pulumi.Input[str]]=None, status: Optional[pulumi.Input[str]]=None, user: Optional[pulumi.Input[str]]=None):
"\n Input properties used for looking up and filtering AccessKey resources.\n :param pulumi.Input[str] create_date: Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the access key was created.\n :param pulumi.Input[str] key_fingerprint: Fingerprint of the PGP key used to encrypt the secret. This attribute is not available for imported resources.\n :param pulumi.Input[str] pgp_key: Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.\n :param pulumi.Input[str] secret: Secret access key. This attribute is not available for imported resources. Note that this will be written to the state file. If you use this, please protect your backend state file judiciously. Alternatively, you may supply a `pgp_key` instead, which will prevent the secret from being stored in plaintext, at the cost of preventing the use of the secret key in automation.\n :param pulumi.Input[str] ses_smtp_password_v4: Secret access key converted into an SES SMTP password by applying [AWS's documented Sigv4 conversion algorithm](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/smtp-credentials.html#smtp-credentials-convert). This attribute is not available for imported resources. As SigV4 is region specific, valid Provider regions are `ap-south-1`, `ap-southeast-2`, `eu-central-1`, `eu-west-1`, `us-east-1` and `us-west-2`. See current [AWS SES regions](https://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region).\n :param pulumi.Input[str] status: Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.\n :param pulumi.Input[str] user: IAM user to associate with this access key.\n "
if (create_date is not None):
pulumi.set(__self__, 'create_date', create_date)
if (encrypted_secret is not None):
pulumi.set(__self__, 'encrypted_secret', encrypted_secret)
if (encrypted_ses_smtp_password_v4 is not None):
pulumi.set(__self__, 'encrypted_ses_smtp_password_v4', encrypted_ses_smtp_password_v4)
if (key_fingerprint is not None):
pulumi.set(__self__, 'key_fingerprint', key_fingerprint)
if (pgp_key is not None):
pulumi.set(__self__, 'pgp_key', pgp_key)
if (secret is not None):
pulumi.set(__self__, 'secret', secret)
if (ses_smtp_password_v4 is not None):
pulumi.set(__self__, 'ses_smtp_password_v4', ses_smtp_password_v4)
if (status is not None):
pulumi.set(__self__, 'status', status)
if (user is not None):
pulumi.set(__self__, 'user', user) | Input properties used for looking up and filtering AccessKey resources.
:param pulumi.Input[str] create_date: Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the access key was created.
:param pulumi.Input[str] key_fingerprint: Fingerprint of the PGP key used to encrypt the secret. This attribute is not available for imported resources.
:param pulumi.Input[str] pgp_key: Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.
:param pulumi.Input[str] secret: Secret access key. This attribute is not available for imported resources. Note that this will be written to the state file. If you use this, please protect your backend state file judiciously. Alternatively, you may supply a `pgp_key` instead, which will prevent the secret from being stored in plaintext, at the cost of preventing the use of the secret key in automation.
:param pulumi.Input[str] ses_smtp_password_v4: Secret access key converted into an SES SMTP password by applying [AWS's documented Sigv4 conversion algorithm](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/smtp-credentials.html#smtp-credentials-convert). This attribute is not available for imported resources. As SigV4 is region specific, valid Provider regions are `ap-south-1`, `ap-southeast-2`, `eu-central-1`, `eu-west-1`, `us-east-1` and `us-west-2`. See current [AWS SES regions](https://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region).
:param pulumi.Input[str] status: Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.
:param pulumi.Input[str] user: IAM user to associate with this access key. | sdk/python/pulumi_aws/iam/access_key.py | __init__ | rapzo/pulumi-aws | 260 | python | def __init__(__self__, *, create_date: Optional[pulumi.Input[str]]=None, encrypted_secret: Optional[pulumi.Input[str]]=None, encrypted_ses_smtp_password_v4: Optional[pulumi.Input[str]]=None, key_fingerprint: Optional[pulumi.Input[str]]=None, pgp_key: Optional[pulumi.Input[str]]=None, secret: Optional[pulumi.Input[str]]=None, ses_smtp_password_v4: Optional[pulumi.Input[str]]=None, status: Optional[pulumi.Input[str]]=None, user: Optional[pulumi.Input[str]]=None):
"\n Input properties used for looking up and filtering AccessKey resources.\n :param pulumi.Input[str] create_date: Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the access key was created.\n :param pulumi.Input[str] key_fingerprint: Fingerprint of the PGP key used to encrypt the secret. This attribute is not available for imported resources.\n :param pulumi.Input[str] pgp_key: Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.\n :param pulumi.Input[str] secret: Secret access key. This attribute is not available for imported resources. Note that this will be written to the state file. If you use this, please protect your backend state file judiciously. Alternatively, you may supply a `pgp_key` instead, which will prevent the secret from being stored in plaintext, at the cost of preventing the use of the secret key in automation.\n :param pulumi.Input[str] ses_smtp_password_v4: Secret access key converted into an SES SMTP password by applying [AWS's documented Sigv4 conversion algorithm](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/smtp-credentials.html#smtp-credentials-convert). This attribute is not available for imported resources. As SigV4 is region specific, valid Provider regions are `ap-south-1`, `ap-southeast-2`, `eu-central-1`, `eu-west-1`, `us-east-1` and `us-west-2`. See current [AWS SES regions](https://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region).\n :param pulumi.Input[str] status: Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.\n :param pulumi.Input[str] user: IAM user to associate with this access key.\n "
if (create_date is not None):
pulumi.set(__self__, 'create_date', create_date)
if (encrypted_secret is not None):
pulumi.set(__self__, 'encrypted_secret', encrypted_secret)
if (encrypted_ses_smtp_password_v4 is not None):
pulumi.set(__self__, 'encrypted_ses_smtp_password_v4', encrypted_ses_smtp_password_v4)
if (key_fingerprint is not None):
pulumi.set(__self__, 'key_fingerprint', key_fingerprint)
if (pgp_key is not None):
pulumi.set(__self__, 'pgp_key', pgp_key)
if (secret is not None):
pulumi.set(__self__, 'secret', secret)
if (ses_smtp_password_v4 is not None):
pulumi.set(__self__, 'ses_smtp_password_v4', ses_smtp_password_v4)
if (status is not None):
pulumi.set(__self__, 'status', status)
if (user is not None):
pulumi.set(__self__, 'user', user) | def __init__(__self__, *, create_date: Optional[pulumi.Input[str]]=None, encrypted_secret: Optional[pulumi.Input[str]]=None, encrypted_ses_smtp_password_v4: Optional[pulumi.Input[str]]=None, key_fingerprint: Optional[pulumi.Input[str]]=None, pgp_key: Optional[pulumi.Input[str]]=None, secret: Optional[pulumi.Input[str]]=None, ses_smtp_password_v4: Optional[pulumi.Input[str]]=None, status: Optional[pulumi.Input[str]]=None, user: Optional[pulumi.Input[str]]=None):
"\n Input properties used for looking up and filtering AccessKey resources.\n :param pulumi.Input[str] create_date: Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the access key was created.\n :param pulumi.Input[str] key_fingerprint: Fingerprint of the PGP key used to encrypt the secret. This attribute is not available for imported resources.\n :param pulumi.Input[str] pgp_key: Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.\n :param pulumi.Input[str] secret: Secret access key. This attribute is not available for imported resources. Note that this will be written to the state file. If you use this, please protect your backend state file judiciously. Alternatively, you may supply a `pgp_key` instead, which will prevent the secret from being stored in plaintext, at the cost of preventing the use of the secret key in automation.\n :param pulumi.Input[str] ses_smtp_password_v4: Secret access key converted into an SES SMTP password by applying [AWS's documented Sigv4 conversion algorithm](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/smtp-credentials.html#smtp-credentials-convert). This attribute is not available for imported resources. As SigV4 is region specific, valid Provider regions are `ap-south-1`, `ap-southeast-2`, `eu-central-1`, `eu-west-1`, `us-east-1` and `us-west-2`. See current [AWS SES regions](https://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region).\n :param pulumi.Input[str] status: Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.\n :param pulumi.Input[str] user: IAM user to associate with this access key.\n "
if (create_date is not None):
pulumi.set(__self__, 'create_date', create_date)
if (encrypted_secret is not None):
pulumi.set(__self__, 'encrypted_secret', encrypted_secret)
if (encrypted_ses_smtp_password_v4 is not None):
pulumi.set(__self__, 'encrypted_ses_smtp_password_v4', encrypted_ses_smtp_password_v4)
if (key_fingerprint is not None):
pulumi.set(__self__, 'key_fingerprint', key_fingerprint)
if (pgp_key is not None):
pulumi.set(__self__, 'pgp_key', pgp_key)
if (secret is not None):
pulumi.set(__self__, 'secret', secret)
if (ses_smtp_password_v4 is not None):
pulumi.set(__self__, 'ses_smtp_password_v4', ses_smtp_password_v4)
if (status is not None):
pulumi.set(__self__, 'status', status)
if (user is not None):
pulumi.set(__self__, 'user', user)<|docstring|>Input properties used for looking up and filtering AccessKey resources.
:param pulumi.Input[str] create_date: Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the access key was created.
:param pulumi.Input[str] key_fingerprint: Fingerprint of the PGP key used to encrypt the secret. This attribute is not available for imported resources.
:param pulumi.Input[str] pgp_key: Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.
:param pulumi.Input[str] secret: Secret access key. This attribute is not available for imported resources. Note that this will be written to the state file. If you use this, please protect your backend state file judiciously. Alternatively, you may supply a `pgp_key` instead, which will prevent the secret from being stored in plaintext, at the cost of preventing the use of the secret key in automation.
:param pulumi.Input[str] ses_smtp_password_v4: Secret access key converted into an SES SMTP password by applying [AWS's documented Sigv4 conversion algorithm](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/smtp-credentials.html#smtp-credentials-convert). This attribute is not available for imported resources. As SigV4 is region specific, valid Provider regions are `ap-south-1`, `ap-southeast-2`, `eu-central-1`, `eu-west-1`, `us-east-1` and `us-west-2`. See current [AWS SES regions](https://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region).
:param pulumi.Input[str] status: Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.
:param pulumi.Input[str] user: IAM user to associate with this access key.<|endoftext|> |
226d1528246040f42ad8aa7352af31727787727d1f0a39993c6973ca804ff1a8 | @property
@pulumi.getter(name='createDate')
def create_date(self) -> Optional[pulumi.Input[str]]:
'\n Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the access key was created.\n '
return pulumi.get(self, 'create_date') | Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the access key was created. | sdk/python/pulumi_aws/iam/access_key.py | create_date | rapzo/pulumi-aws | 260 | python | @property
@pulumi.getter(name='createDate')
def create_date(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'create_date') | @property
@pulumi.getter(name='createDate')
def create_date(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'create_date')<|docstring|>Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the access key was created.<|endoftext|> |
7955bb670b5d715881ca41655456459121b5fe04347d43a1321181156a2411d4 | @property
@pulumi.getter(name='keyFingerprint')
def key_fingerprint(self) -> Optional[pulumi.Input[str]]:
'\n Fingerprint of the PGP key used to encrypt the secret. This attribute is not available for imported resources.\n '
return pulumi.get(self, 'key_fingerprint') | Fingerprint of the PGP key used to encrypt the secret. This attribute is not available for imported resources. | sdk/python/pulumi_aws/iam/access_key.py | key_fingerprint | rapzo/pulumi-aws | 260 | python | @property
@pulumi.getter(name='keyFingerprint')
def key_fingerprint(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'key_fingerprint') | @property
@pulumi.getter(name='keyFingerprint')
def key_fingerprint(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'key_fingerprint')<|docstring|>Fingerprint of the PGP key used to encrypt the secret. This attribute is not available for imported resources.<|endoftext|> |
c78fe070d9714d3f4992838b5cf041dff6e1b69882576d35ea0b6d573ad171a6 | @property
@pulumi.getter(name='pgpKey')
def pgp_key(self) -> Optional[pulumi.Input[str]]:
'\n Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.\n '
return pulumi.get(self, 'pgp_key') | Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute. | sdk/python/pulumi_aws/iam/access_key.py | pgp_key | rapzo/pulumi-aws | 260 | python | @property
@pulumi.getter(name='pgpKey')
def pgp_key(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'pgp_key') | @property
@pulumi.getter(name='pgpKey')
def pgp_key(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'pgp_key')<|docstring|>Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.<|endoftext|> |
2e19d42886a31c1f517fee76f2ae7ab36f3abde4dc41490059ed5c88f14c7d4c | @property
@pulumi.getter
def secret(self) -> Optional[pulumi.Input[str]]:
'\n Secret access key. This attribute is not available for imported resources. Note that this will be written to the state file. If you use this, please protect your backend state file judiciously. Alternatively, you may supply a `pgp_key` instead, which will prevent the secret from being stored in plaintext, at the cost of preventing the use of the secret key in automation.\n '
return pulumi.get(self, 'secret') | Secret access key. This attribute is not available for imported resources. Note that this will be written to the state file. If you use this, please protect your backend state file judiciously. Alternatively, you may supply a `pgp_key` instead, which will prevent the secret from being stored in plaintext, at the cost of preventing the use of the secret key in automation. | sdk/python/pulumi_aws/iam/access_key.py | secret | rapzo/pulumi-aws | 260 | python | @property
@pulumi.getter
def secret(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'secret') | @property
@pulumi.getter
def secret(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'secret')<|docstring|>Secret access key. This attribute is not available for imported resources. Note that this will be written to the state file. If you use this, please protect your backend state file judiciously. Alternatively, you may supply a `pgp_key` instead, which will prevent the secret from being stored in plaintext, at the cost of preventing the use of the secret key in automation.<|endoftext|> |
1f634e2e1d27921844a269ca54cbd13899d16947c0f1b4dd7701078b99160be0 | @property
@pulumi.getter(name='sesSmtpPasswordV4')
def ses_smtp_password_v4(self) -> Optional[pulumi.Input[str]]:
"\n Secret access key converted into an SES SMTP password by applying [AWS's documented Sigv4 conversion algorithm](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/smtp-credentials.html#smtp-credentials-convert). This attribute is not available for imported resources. As SigV4 is region specific, valid Provider regions are `ap-south-1`, `ap-southeast-2`, `eu-central-1`, `eu-west-1`, `us-east-1` and `us-west-2`. See current [AWS SES regions](https://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region).\n "
return pulumi.get(self, 'ses_smtp_password_v4') | Secret access key converted into an SES SMTP password by applying [AWS's documented Sigv4 conversion algorithm](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/smtp-credentials.html#smtp-credentials-convert). This attribute is not available for imported resources. As SigV4 is region specific, valid Provider regions are `ap-south-1`, `ap-southeast-2`, `eu-central-1`, `eu-west-1`, `us-east-1` and `us-west-2`. See current [AWS SES regions](https://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region). | sdk/python/pulumi_aws/iam/access_key.py | ses_smtp_password_v4 | rapzo/pulumi-aws | 260 | python | @property
@pulumi.getter(name='sesSmtpPasswordV4')
def ses_smtp_password_v4(self) -> Optional[pulumi.Input[str]]:
"\n \n "
return pulumi.get(self, 'ses_smtp_password_v4') | @property
@pulumi.getter(name='sesSmtpPasswordV4')
def ses_smtp_password_v4(self) -> Optional[pulumi.Input[str]]:
"\n \n "
return pulumi.get(self, 'ses_smtp_password_v4')<|docstring|>Secret access key converted into an SES SMTP password by applying [AWS's documented Sigv4 conversion algorithm](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/smtp-credentials.html#smtp-credentials-convert). This attribute is not available for imported resources. As SigV4 is region specific, valid Provider regions are `ap-south-1`, `ap-southeast-2`, `eu-central-1`, `eu-west-1`, `us-east-1` and `us-west-2`. See current [AWS SES regions](https://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region).<|endoftext|> |
4d9325eb6593746375c0418566825284d0e7655b5ca752a464ec5f116cf125b7 | @property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
'\n Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.\n '
return pulumi.get(self, 'status') | Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`. | sdk/python/pulumi_aws/iam/access_key.py | status | rapzo/pulumi-aws | 260 | python | @property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'status') | @property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'status')<|docstring|>Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.<|endoftext|> |
4051bdf2d60a4dd51e6b8ea7a96a7a61712ba423a72baf54bb2fb69974b39906 | @property
@pulumi.getter
def user(self) -> Optional[pulumi.Input[str]]:
'\n IAM user to associate with this access key.\n '
return pulumi.get(self, 'user') | IAM user to associate with this access key. | sdk/python/pulumi_aws/iam/access_key.py | user | rapzo/pulumi-aws | 260 | python | @property
@pulumi.getter
def user(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'user') | @property
@pulumi.getter
def user(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'user')<|docstring|>IAM user to associate with this access key.<|endoftext|> |
599c4c49a417808e800b6583e63bc95fad7b34cff6d7c466f2f2e8ab96bbaded | @overload
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, pgp_key: Optional[pulumi.Input[str]]=None, status: Optional[pulumi.Input[str]]=None, user: Optional[pulumi.Input[str]]=None, __props__=None):
'\n Provides an IAM access key. This is a set of credentials that allow API requests to be made as an IAM user.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n lb_user = aws.iam.User("lbUser", path="/system/")\n lb_access_key = aws.iam.AccessKey("lbAccessKey",\n user=lb_user.name,\n pgp_key="keybase:some_person_that_exists")\n lb_ro = aws.iam.UserPolicy("lbRo",\n user=lb_user.name,\n policy="""{\n "Version": "2012-10-17",\n "Statement": [\n {\n "Action": [\n "ec2:Describe*"\n ],\n "Effect": "Allow",\n "Resource": "*"\n }\n ]\n }\n """)\n pulumi.export("secret", lb_access_key.encrypted_secret)\n ```\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n test_user = aws.iam.User("testUser", path="/test/")\n test_access_key = aws.iam.AccessKey("testAccessKey", user=test_user.name)\n pulumi.export("awsIamSmtpPasswordV4", test_access_key.ses_smtp_password_v4)\n ```\n\n ## Import\n\n IAM Access Keys can be imported using the identifier, e.g.\n\n ```sh\n $ pulumi import aws:iam/accessKey:AccessKey example AKIA1234567890\n ```\n\n Resource attributes such as `encrypted_secret`, `key_fingerprint`, `pgp_key`, `secret`, `ses_smtp_password_v4`, and `encrypted_ses_smtp_password_v4` are not available for imported resources as this information cannot be read from the IAM API.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] pgp_key: Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.\n :param pulumi.Input[str] status: Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.\n :param pulumi.Input[str] user: IAM user to associate with this access key.\n '
... | Provides an IAM access key. This is a set of credentials that allow API requests to be made as an IAM user.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
lb_user = aws.iam.User("lbUser", path="/system/")
lb_access_key = aws.iam.AccessKey("lbAccessKey",
user=lb_user.name,
pgp_key="keybase:some_person_that_exists")
lb_ro = aws.iam.UserPolicy("lbRo",
user=lb_user.name,
policy="""{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"ec2:Describe*"
],
"Effect": "Allow",
"Resource": "*"
}
]
}
""")
pulumi.export("secret", lb_access_key.encrypted_secret)
```
```python
import pulumi
import pulumi_aws as aws
test_user = aws.iam.User("testUser", path="/test/")
test_access_key = aws.iam.AccessKey("testAccessKey", user=test_user.name)
pulumi.export("awsIamSmtpPasswordV4", test_access_key.ses_smtp_password_v4)
```
## Import
IAM Access Keys can be imported using the identifier, e.g.
```sh
$ pulumi import aws:iam/accessKey:AccessKey example AKIA1234567890
```
Resource attributes such as `encrypted_secret`, `key_fingerprint`, `pgp_key`, `secret`, `ses_smtp_password_v4`, and `encrypted_ses_smtp_password_v4` are not available for imported resources as this information cannot be read from the IAM API.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] pgp_key: Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.
:param pulumi.Input[str] status: Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.
:param pulumi.Input[str] user: IAM user to associate with this access key. | sdk/python/pulumi_aws/iam/access_key.py | __init__ | rapzo/pulumi-aws | 260 | python | @overload
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, pgp_key: Optional[pulumi.Input[str]]=None, status: Optional[pulumi.Input[str]]=None, user: Optional[pulumi.Input[str]]=None, __props__=None):
'\n Provides an IAM access key. This is a set of credentials that allow API requests to be made as an IAM user.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n lb_user = aws.iam.User("lbUser", path="/system/")\n lb_access_key = aws.iam.AccessKey("lbAccessKey",\n user=lb_user.name,\n pgp_key="keybase:some_person_that_exists")\n lb_ro = aws.iam.UserPolicy("lbRo",\n user=lb_user.name,\n policy="{\n "Version": "2012-10-17",\n "Statement": [\n {\n "Action": [\n "ec2:Describe*"\n ],\n "Effect": "Allow",\n "Resource": "*"\n }\n ]\n }\n ")\n pulumi.export("secret", lb_access_key.encrypted_secret)\n ```\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n test_user = aws.iam.User("testUser", path="/test/")\n test_access_key = aws.iam.AccessKey("testAccessKey", user=test_user.name)\n pulumi.export("awsIamSmtpPasswordV4", test_access_key.ses_smtp_password_v4)\n ```\n\n ## Import\n\n IAM Access Keys can be imported using the identifier, e.g.\n\n ```sh\n $ pulumi import aws:iam/accessKey:AccessKey example AKIA1234567890\n ```\n\n Resource attributes such as `encrypted_secret`, `key_fingerprint`, `pgp_key`, `secret`, `ses_smtp_password_v4`, and `encrypted_ses_smtp_password_v4` are not available for imported resources as this information cannot be read from the IAM API.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] pgp_key: Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.\n :param pulumi.Input[str] status: Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.\n :param pulumi.Input[str] user: IAM user to associate with this access key.\n '
... | @overload
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, pgp_key: Optional[pulumi.Input[str]]=None, status: Optional[pulumi.Input[str]]=None, user: Optional[pulumi.Input[str]]=None, __props__=None):
'\n Provides an IAM access key. This is a set of credentials that allow API requests to be made as an IAM user.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n lb_user = aws.iam.User("lbUser", path="/system/")\n lb_access_key = aws.iam.AccessKey("lbAccessKey",\n user=lb_user.name,\n pgp_key="keybase:some_person_that_exists")\n lb_ro = aws.iam.UserPolicy("lbRo",\n user=lb_user.name,\n policy="{\n "Version": "2012-10-17",\n "Statement": [\n {\n "Action": [\n "ec2:Describe*"\n ],\n "Effect": "Allow",\n "Resource": "*"\n }\n ]\n }\n ")\n pulumi.export("secret", lb_access_key.encrypted_secret)\n ```\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n test_user = aws.iam.User("testUser", path="/test/")\n test_access_key = aws.iam.AccessKey("testAccessKey", user=test_user.name)\n pulumi.export("awsIamSmtpPasswordV4", test_access_key.ses_smtp_password_v4)\n ```\n\n ## Import\n\n IAM Access Keys can be imported using the identifier, e.g.\n\n ```sh\n $ pulumi import aws:iam/accessKey:AccessKey example AKIA1234567890\n ```\n\n Resource attributes such as `encrypted_secret`, `key_fingerprint`, `pgp_key`, `secret`, `ses_smtp_password_v4`, and `encrypted_ses_smtp_password_v4` are not available for imported resources as this information cannot be read from the IAM API.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] pgp_key: Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.\n :param pulumi.Input[str] status: Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.\n :param pulumi.Input[str] user: IAM user to associate with this access key.\n '
...<|docstring|>Provides an IAM access key. This is a set of credentials that allow API requests to be made as an IAM user.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
lb_user = aws.iam.User("lbUser", path="/system/")
lb_access_key = aws.iam.AccessKey("lbAccessKey",
user=lb_user.name,
pgp_key="keybase:some_person_that_exists")
lb_ro = aws.iam.UserPolicy("lbRo",
user=lb_user.name,
policy="""{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"ec2:Describe*"
],
"Effect": "Allow",
"Resource": "*"
}
]
}
""")
pulumi.export("secret", lb_access_key.encrypted_secret)
```
```python
import pulumi
import pulumi_aws as aws
test_user = aws.iam.User("testUser", path="/test/")
test_access_key = aws.iam.AccessKey("testAccessKey", user=test_user.name)
pulumi.export("awsIamSmtpPasswordV4", test_access_key.ses_smtp_password_v4)
```
## Import
IAM Access Keys can be imported using the identifier, e.g.
```sh
$ pulumi import aws:iam/accessKey:AccessKey example AKIA1234567890
```
Resource attributes such as `encrypted_secret`, `key_fingerprint`, `pgp_key`, `secret`, `ses_smtp_password_v4`, and `encrypted_ses_smtp_password_v4` are not available for imported resources as this information cannot be read from the IAM API.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] pgp_key: Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.
:param pulumi.Input[str] status: Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.
:param pulumi.Input[str] user: IAM user to associate with this access key.<|endoftext|> |
6f1b807c5e82ef648073b52af526baba5033cc5c65d5e0444aaf63def148a7ef | @overload
def __init__(__self__, resource_name: str, args: AccessKeyArgs, opts: Optional[pulumi.ResourceOptions]=None):
'\n Provides an IAM access key. This is a set of credentials that allow API requests to be made as an IAM user.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n lb_user = aws.iam.User("lbUser", path="/system/")\n lb_access_key = aws.iam.AccessKey("lbAccessKey",\n user=lb_user.name,\n pgp_key="keybase:some_person_that_exists")\n lb_ro = aws.iam.UserPolicy("lbRo",\n user=lb_user.name,\n policy="""{\n "Version": "2012-10-17",\n "Statement": [\n {\n "Action": [\n "ec2:Describe*"\n ],\n "Effect": "Allow",\n "Resource": "*"\n }\n ]\n }\n """)\n pulumi.export("secret", lb_access_key.encrypted_secret)\n ```\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n test_user = aws.iam.User("testUser", path="/test/")\n test_access_key = aws.iam.AccessKey("testAccessKey", user=test_user.name)\n pulumi.export("awsIamSmtpPasswordV4", test_access_key.ses_smtp_password_v4)\n ```\n\n ## Import\n\n IAM Access Keys can be imported using the identifier, e.g.\n\n ```sh\n $ pulumi import aws:iam/accessKey:AccessKey example AKIA1234567890\n ```\n\n Resource attributes such as `encrypted_secret`, `key_fingerprint`, `pgp_key`, `secret`, `ses_smtp_password_v4`, and `encrypted_ses_smtp_password_v4` are not available for imported resources as this information cannot be read from the IAM API.\n\n :param str resource_name: The name of the resource.\n :param AccessKeyArgs args: The arguments to use to populate this resource\'s properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n '
... | Provides an IAM access key. This is a set of credentials that allow API requests to be made as an IAM user.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
lb_user = aws.iam.User("lbUser", path="/system/")
lb_access_key = aws.iam.AccessKey("lbAccessKey",
user=lb_user.name,
pgp_key="keybase:some_person_that_exists")
lb_ro = aws.iam.UserPolicy("lbRo",
user=lb_user.name,
policy="""{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"ec2:Describe*"
],
"Effect": "Allow",
"Resource": "*"
}
]
}
""")
pulumi.export("secret", lb_access_key.encrypted_secret)
```
```python
import pulumi
import pulumi_aws as aws
test_user = aws.iam.User("testUser", path="/test/")
test_access_key = aws.iam.AccessKey("testAccessKey", user=test_user.name)
pulumi.export("awsIamSmtpPasswordV4", test_access_key.ses_smtp_password_v4)
```
## Import
IAM Access Keys can be imported using the identifier, e.g.
```sh
$ pulumi import aws:iam/accessKey:AccessKey example AKIA1234567890
```
Resource attributes such as `encrypted_secret`, `key_fingerprint`, `pgp_key`, `secret`, `ses_smtp_password_v4`, and `encrypted_ses_smtp_password_v4` are not available for imported resources as this information cannot be read from the IAM API.
:param str resource_name: The name of the resource.
:param AccessKeyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource. | sdk/python/pulumi_aws/iam/access_key.py | __init__ | rapzo/pulumi-aws | 260 | python | @overload
def __init__(__self__, resource_name: str, args: AccessKeyArgs, opts: Optional[pulumi.ResourceOptions]=None):
'\n Provides an IAM access key. This is a set of credentials that allow API requests to be made as an IAM user.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n lb_user = aws.iam.User("lbUser", path="/system/")\n lb_access_key = aws.iam.AccessKey("lbAccessKey",\n user=lb_user.name,\n pgp_key="keybase:some_person_that_exists")\n lb_ro = aws.iam.UserPolicy("lbRo",\n user=lb_user.name,\n policy="{\n "Version": "2012-10-17",\n "Statement": [\n {\n "Action": [\n "ec2:Describe*"\n ],\n "Effect": "Allow",\n "Resource": "*"\n }\n ]\n }\n ")\n pulumi.export("secret", lb_access_key.encrypted_secret)\n ```\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n test_user = aws.iam.User("testUser", path="/test/")\n test_access_key = aws.iam.AccessKey("testAccessKey", user=test_user.name)\n pulumi.export("awsIamSmtpPasswordV4", test_access_key.ses_smtp_password_v4)\n ```\n\n ## Import\n\n IAM Access Keys can be imported using the identifier, e.g.\n\n ```sh\n $ pulumi import aws:iam/accessKey:AccessKey example AKIA1234567890\n ```\n\n Resource attributes such as `encrypted_secret`, `key_fingerprint`, `pgp_key`, `secret`, `ses_smtp_password_v4`, and `encrypted_ses_smtp_password_v4` are not available for imported resources as this information cannot be read from the IAM API.\n\n :param str resource_name: The name of the resource.\n :param AccessKeyArgs args: The arguments to use to populate this resource\'s properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n '
... | @overload
def __init__(__self__, resource_name: str, args: AccessKeyArgs, opts: Optional[pulumi.ResourceOptions]=None):
'\n Provides an IAM access key. This is a set of credentials that allow API requests to be made as an IAM user.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n lb_user = aws.iam.User("lbUser", path="/system/")\n lb_access_key = aws.iam.AccessKey("lbAccessKey",\n user=lb_user.name,\n pgp_key="keybase:some_person_that_exists")\n lb_ro = aws.iam.UserPolicy("lbRo",\n user=lb_user.name,\n policy="{\n "Version": "2012-10-17",\n "Statement": [\n {\n "Action": [\n "ec2:Describe*"\n ],\n "Effect": "Allow",\n "Resource": "*"\n }\n ]\n }\n ")\n pulumi.export("secret", lb_access_key.encrypted_secret)\n ```\n\n ```python\n import pulumi\n import pulumi_aws as aws\n\n test_user = aws.iam.User("testUser", path="/test/")\n test_access_key = aws.iam.AccessKey("testAccessKey", user=test_user.name)\n pulumi.export("awsIamSmtpPasswordV4", test_access_key.ses_smtp_password_v4)\n ```\n\n ## Import\n\n IAM Access Keys can be imported using the identifier, e.g.\n\n ```sh\n $ pulumi import aws:iam/accessKey:AccessKey example AKIA1234567890\n ```\n\n Resource attributes such as `encrypted_secret`, `key_fingerprint`, `pgp_key`, `secret`, `ses_smtp_password_v4`, and `encrypted_ses_smtp_password_v4` are not available for imported resources as this information cannot be read from the IAM API.\n\n :param str resource_name: The name of the resource.\n :param AccessKeyArgs args: The arguments to use to populate this resource\'s properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n '
...<|docstring|>Provides an IAM access key. This is a set of credentials that allow API requests to be made as an IAM user.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
lb_user = aws.iam.User("lbUser", path="/system/")
lb_access_key = aws.iam.AccessKey("lbAccessKey",
user=lb_user.name,
pgp_key="keybase:some_person_that_exists")
lb_ro = aws.iam.UserPolicy("lbRo",
user=lb_user.name,
policy="""{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"ec2:Describe*"
],
"Effect": "Allow",
"Resource": "*"
}
]
}
""")
pulumi.export("secret", lb_access_key.encrypted_secret)
```
```python
import pulumi
import pulumi_aws as aws
test_user = aws.iam.User("testUser", path="/test/")
test_access_key = aws.iam.AccessKey("testAccessKey", user=test_user.name)
pulumi.export("awsIamSmtpPasswordV4", test_access_key.ses_smtp_password_v4)
```
## Import
IAM Access Keys can be imported using the identifier, e.g.
```sh
$ pulumi import aws:iam/accessKey:AccessKey example AKIA1234567890
```
Resource attributes such as `encrypted_secret`, `key_fingerprint`, `pgp_key`, `secret`, `ses_smtp_password_v4`, and `encrypted_ses_smtp_password_v4` are not available for imported resources as this information cannot be read from the IAM API.
:param str resource_name: The name of the resource.
:param AccessKeyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.<|endoftext|> |
6f577ef2f4daa1aa9157fce7f9d2433b6a87120a32fb459fb945f658cd801c43 | @staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None, create_date: Optional[pulumi.Input[str]]=None, encrypted_secret: Optional[pulumi.Input[str]]=None, encrypted_ses_smtp_password_v4: Optional[pulumi.Input[str]]=None, key_fingerprint: Optional[pulumi.Input[str]]=None, pgp_key: Optional[pulumi.Input[str]]=None, secret: Optional[pulumi.Input[str]]=None, ses_smtp_password_v4: Optional[pulumi.Input[str]]=None, status: Optional[pulumi.Input[str]]=None, user: Optional[pulumi.Input[str]]=None) -> 'AccessKey':
"\n Get an existing AccessKey resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] create_date: Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the access key was created.\n :param pulumi.Input[str] key_fingerprint: Fingerprint of the PGP key used to encrypt the secret. This attribute is not available for imported resources.\n :param pulumi.Input[str] pgp_key: Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.\n :param pulumi.Input[str] secret: Secret access key. This attribute is not available for imported resources. Note that this will be written to the state file. If you use this, please protect your backend state file judiciously. Alternatively, you may supply a `pgp_key` instead, which will prevent the secret from being stored in plaintext, at the cost of preventing the use of the secret key in automation.\n :param pulumi.Input[str] ses_smtp_password_v4: Secret access key converted into an SES SMTP password by applying [AWS's documented Sigv4 conversion algorithm](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/smtp-credentials.html#smtp-credentials-convert). This attribute is not available for imported resources. As SigV4 is region specific, valid Provider regions are `ap-south-1`, `ap-southeast-2`, `eu-central-1`, `eu-west-1`, `us-east-1` and `us-west-2`. See current [AWS SES regions](https://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region).\n :param pulumi.Input[str] status: Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.\n :param pulumi.Input[str] user: IAM user to associate with this access key.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _AccessKeyState.__new__(_AccessKeyState)
__props__.__dict__['create_date'] = create_date
__props__.__dict__['encrypted_secret'] = encrypted_secret
__props__.__dict__['encrypted_ses_smtp_password_v4'] = encrypted_ses_smtp_password_v4
__props__.__dict__['key_fingerprint'] = key_fingerprint
__props__.__dict__['pgp_key'] = pgp_key
__props__.__dict__['secret'] = secret
__props__.__dict__['ses_smtp_password_v4'] = ses_smtp_password_v4
__props__.__dict__['status'] = status
__props__.__dict__['user'] = user
return AccessKey(resource_name, opts=opts, __props__=__props__) | Get an existing AccessKey resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] create_date: Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the access key was created.
:param pulumi.Input[str] key_fingerprint: Fingerprint of the PGP key used to encrypt the secret. This attribute is not available for imported resources.
:param pulumi.Input[str] pgp_key: Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.
:param pulumi.Input[str] secret: Secret access key. This attribute is not available for imported resources. Note that this will be written to the state file. If you use this, please protect your backend state file judiciously. Alternatively, you may supply a `pgp_key` instead, which will prevent the secret from being stored in plaintext, at the cost of preventing the use of the secret key in automation.
:param pulumi.Input[str] ses_smtp_password_v4: Secret access key converted into an SES SMTP password by applying [AWS's documented Sigv4 conversion algorithm](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/smtp-credentials.html#smtp-credentials-convert). This attribute is not available for imported resources. As SigV4 is region specific, valid Provider regions are `ap-south-1`, `ap-southeast-2`, `eu-central-1`, `eu-west-1`, `us-east-1` and `us-west-2`. See current [AWS SES regions](https://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region).
:param pulumi.Input[str] status: Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.
:param pulumi.Input[str] user: IAM user to associate with this access key. | sdk/python/pulumi_aws/iam/access_key.py | get | rapzo/pulumi-aws | 260 | python | @staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None, create_date: Optional[pulumi.Input[str]]=None, encrypted_secret: Optional[pulumi.Input[str]]=None, encrypted_ses_smtp_password_v4: Optional[pulumi.Input[str]]=None, key_fingerprint: Optional[pulumi.Input[str]]=None, pgp_key: Optional[pulumi.Input[str]]=None, secret: Optional[pulumi.Input[str]]=None, ses_smtp_password_v4: Optional[pulumi.Input[str]]=None, status: Optional[pulumi.Input[str]]=None, user: Optional[pulumi.Input[str]]=None) -> 'AccessKey':
"\n Get an existing AccessKey resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] create_date: Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the access key was created.\n :param pulumi.Input[str] key_fingerprint: Fingerprint of the PGP key used to encrypt the secret. This attribute is not available for imported resources.\n :param pulumi.Input[str] pgp_key: Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.\n :param pulumi.Input[str] secret: Secret access key. This attribute is not available for imported resources. Note that this will be written to the state file. If you use this, please protect your backend state file judiciously. Alternatively, you may supply a `pgp_key` instead, which will prevent the secret from being stored in plaintext, at the cost of preventing the use of the secret key in automation.\n :param pulumi.Input[str] ses_smtp_password_v4: Secret access key converted into an SES SMTP password by applying [AWS's documented Sigv4 conversion algorithm](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/smtp-credentials.html#smtp-credentials-convert). This attribute is not available for imported resources. As SigV4 is region specific, valid Provider regions are `ap-south-1`, `ap-southeast-2`, `eu-central-1`, `eu-west-1`, `us-east-1` and `us-west-2`. See current [AWS SES regions](https://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region).\n :param pulumi.Input[str] status: Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.\n :param pulumi.Input[str] user: IAM user to associate with this access key.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _AccessKeyState.__new__(_AccessKeyState)
__props__.__dict__['create_date'] = create_date
__props__.__dict__['encrypted_secret'] = encrypted_secret
__props__.__dict__['encrypted_ses_smtp_password_v4'] = encrypted_ses_smtp_password_v4
__props__.__dict__['key_fingerprint'] = key_fingerprint
__props__.__dict__['pgp_key'] = pgp_key
__props__.__dict__['secret'] = secret
__props__.__dict__['ses_smtp_password_v4'] = ses_smtp_password_v4
__props__.__dict__['status'] = status
__props__.__dict__['user'] = user
return AccessKey(resource_name, opts=opts, __props__=__props__) | @staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None, create_date: Optional[pulumi.Input[str]]=None, encrypted_secret: Optional[pulumi.Input[str]]=None, encrypted_ses_smtp_password_v4: Optional[pulumi.Input[str]]=None, key_fingerprint: Optional[pulumi.Input[str]]=None, pgp_key: Optional[pulumi.Input[str]]=None, secret: Optional[pulumi.Input[str]]=None, ses_smtp_password_v4: Optional[pulumi.Input[str]]=None, status: Optional[pulumi.Input[str]]=None, user: Optional[pulumi.Input[str]]=None) -> 'AccessKey':
"\n Get an existing AccessKey resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] create_date: Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the access key was created.\n :param pulumi.Input[str] key_fingerprint: Fingerprint of the PGP key used to encrypt the secret. This attribute is not available for imported resources.\n :param pulumi.Input[str] pgp_key: Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.\n :param pulumi.Input[str] secret: Secret access key. This attribute is not available for imported resources. Note that this will be written to the state file. If you use this, please protect your backend state file judiciously. Alternatively, you may supply a `pgp_key` instead, which will prevent the secret from being stored in plaintext, at the cost of preventing the use of the secret key in automation.\n :param pulumi.Input[str] ses_smtp_password_v4: Secret access key converted into an SES SMTP password by applying [AWS's documented Sigv4 conversion algorithm](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/smtp-credentials.html#smtp-credentials-convert). This attribute is not available for imported resources. As SigV4 is region specific, valid Provider regions are `ap-south-1`, `ap-southeast-2`, `eu-central-1`, `eu-west-1`, `us-east-1` and `us-west-2`. See current [AWS SES regions](https://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region).\n :param pulumi.Input[str] status: Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.\n :param pulumi.Input[str] user: IAM user to associate with this access key.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _AccessKeyState.__new__(_AccessKeyState)
__props__.__dict__['create_date'] = create_date
__props__.__dict__['encrypted_secret'] = encrypted_secret
__props__.__dict__['encrypted_ses_smtp_password_v4'] = encrypted_ses_smtp_password_v4
__props__.__dict__['key_fingerprint'] = key_fingerprint
__props__.__dict__['pgp_key'] = pgp_key
__props__.__dict__['secret'] = secret
__props__.__dict__['ses_smtp_password_v4'] = ses_smtp_password_v4
__props__.__dict__['status'] = status
__props__.__dict__['user'] = user
return AccessKey(resource_name, opts=opts, __props__=__props__)<|docstring|>Get an existing AccessKey resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] create_date: Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the access key was created.
:param pulumi.Input[str] key_fingerprint: Fingerprint of the PGP key used to encrypt the secret. This attribute is not available for imported resources.
:param pulumi.Input[str] pgp_key: Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.
:param pulumi.Input[str] secret: Secret access key. This attribute is not available for imported resources. Note that this will be written to the state file. If you use this, please protect your backend state file judiciously. Alternatively, you may supply a `pgp_key` instead, which will prevent the secret from being stored in plaintext, at the cost of preventing the use of the secret key in automation.
:param pulumi.Input[str] ses_smtp_password_v4: Secret access key converted into an SES SMTP password by applying [AWS's documented Sigv4 conversion algorithm](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/smtp-credentials.html#smtp-credentials-convert). This attribute is not available for imported resources. As SigV4 is region specific, valid Provider regions are `ap-south-1`, `ap-southeast-2`, `eu-central-1`, `eu-west-1`, `us-east-1` and `us-west-2`. See current [AWS SES regions](https://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region).
:param pulumi.Input[str] status: Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.
:param pulumi.Input[str] user: IAM user to associate with this access key.<|endoftext|> |
ab00855a75d29ba1eb12e2d6761b97c2be94130fe5f6a565ae28e1ddb518bb8f | @property
@pulumi.getter(name='createDate')
def create_date(self) -> pulumi.Output[str]:
'\n Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the access key was created.\n '
return pulumi.get(self, 'create_date') | Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the access key was created. | sdk/python/pulumi_aws/iam/access_key.py | create_date | rapzo/pulumi-aws | 260 | python | @property
@pulumi.getter(name='createDate')
def create_date(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'create_date') | @property
@pulumi.getter(name='createDate')
def create_date(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'create_date')<|docstring|>Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the access key was created.<|endoftext|> |
a132383e957e9f76c61c23f0c7a549acf351c99a12e5279cb171442ec5bafc26 | @property
@pulumi.getter(name='keyFingerprint')
def key_fingerprint(self) -> pulumi.Output[str]:
'\n Fingerprint of the PGP key used to encrypt the secret. This attribute is not available for imported resources.\n '
return pulumi.get(self, 'key_fingerprint') | Fingerprint of the PGP key used to encrypt the secret. This attribute is not available for imported resources. | sdk/python/pulumi_aws/iam/access_key.py | key_fingerprint | rapzo/pulumi-aws | 260 | python | @property
@pulumi.getter(name='keyFingerprint')
def key_fingerprint(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'key_fingerprint') | @property
@pulumi.getter(name='keyFingerprint')
def key_fingerprint(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'key_fingerprint')<|docstring|>Fingerprint of the PGP key used to encrypt the secret. This attribute is not available for imported resources.<|endoftext|> |
551c1d6b7b09f5223cbfd99bf6063ebd533f6129d3797af010de4c25712e5dc9 | @property
@pulumi.getter(name='pgpKey')
def pgp_key(self) -> pulumi.Output[Optional[str]]:
'\n Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.\n '
return pulumi.get(self, 'pgp_key') | Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute. | sdk/python/pulumi_aws/iam/access_key.py | pgp_key | rapzo/pulumi-aws | 260 | python | @property
@pulumi.getter(name='pgpKey')
def pgp_key(self) -> pulumi.Output[Optional[str]]:
'\n \n '
return pulumi.get(self, 'pgp_key') | @property
@pulumi.getter(name='pgpKey')
def pgp_key(self) -> pulumi.Output[Optional[str]]:
'\n \n '
return pulumi.get(self, 'pgp_key')<|docstring|>Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.<|endoftext|> |
72404a36c252320dcf33da2e2a4d9a14abaaf77b3c79ed7adeb92bda04099d86 | @property
@pulumi.getter
def secret(self) -> pulumi.Output[str]:
'\n Secret access key. This attribute is not available for imported resources. Note that this will be written to the state file. If you use this, please protect your backend state file judiciously. Alternatively, you may supply a `pgp_key` instead, which will prevent the secret from being stored in plaintext, at the cost of preventing the use of the secret key in automation.\n '
return pulumi.get(self, 'secret') | Secret access key. This attribute is not available for imported resources. Note that this will be written to the state file. If you use this, please protect your backend state file judiciously. Alternatively, you may supply a `pgp_key` instead, which will prevent the secret from being stored in plaintext, at the cost of preventing the use of the secret key in automation. | sdk/python/pulumi_aws/iam/access_key.py | secret | rapzo/pulumi-aws | 260 | python | @property
@pulumi.getter
def secret(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'secret') | @property
@pulumi.getter
def secret(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'secret')<|docstring|>Secret access key. This attribute is not available for imported resources. Note that this will be written to the state file. If you use this, please protect your backend state file judiciously. Alternatively, you may supply a `pgp_key` instead, which will prevent the secret from being stored in plaintext, at the cost of preventing the use of the secret key in automation.<|endoftext|> |
0ef88a09edb9afad919004a75df3ca42d55f3ec53ed66572acb6db30128e5444 | @property
@pulumi.getter(name='sesSmtpPasswordV4')
def ses_smtp_password_v4(self) -> pulumi.Output[str]:
"\n Secret access key converted into an SES SMTP password by applying [AWS's documented Sigv4 conversion algorithm](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/smtp-credentials.html#smtp-credentials-convert). This attribute is not available for imported resources. As SigV4 is region specific, valid Provider regions are `ap-south-1`, `ap-southeast-2`, `eu-central-1`, `eu-west-1`, `us-east-1` and `us-west-2`. See current [AWS SES regions](https://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region).\n "
return pulumi.get(self, 'ses_smtp_password_v4') | Secret access key converted into an SES SMTP password by applying [AWS's documented Sigv4 conversion algorithm](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/smtp-credentials.html#smtp-credentials-convert). This attribute is not available for imported resources. As SigV4 is region specific, valid Provider regions are `ap-south-1`, `ap-southeast-2`, `eu-central-1`, `eu-west-1`, `us-east-1` and `us-west-2`. See current [AWS SES regions](https://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region). | sdk/python/pulumi_aws/iam/access_key.py | ses_smtp_password_v4 | rapzo/pulumi-aws | 260 | python | @property
@pulumi.getter(name='sesSmtpPasswordV4')
def ses_smtp_password_v4(self) -> pulumi.Output[str]:
"\n \n "
return pulumi.get(self, 'ses_smtp_password_v4') | @property
@pulumi.getter(name='sesSmtpPasswordV4')
def ses_smtp_password_v4(self) -> pulumi.Output[str]:
"\n \n "
return pulumi.get(self, 'ses_smtp_password_v4')<|docstring|>Secret access key converted into an SES SMTP password by applying [AWS's documented Sigv4 conversion algorithm](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/smtp-credentials.html#smtp-credentials-convert). This attribute is not available for imported resources. As SigV4 is region specific, valid Provider regions are `ap-south-1`, `ap-southeast-2`, `eu-central-1`, `eu-west-1`, `us-east-1` and `us-west-2`. See current [AWS SES regions](https://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region).<|endoftext|> |
9e7595233a0743b855e3a7edddd0c11d99f50fd8ca65ac17dd988822389f8c5a | @property
@pulumi.getter
def status(self) -> pulumi.Output[Optional[str]]:
'\n Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.\n '
return pulumi.get(self, 'status') | Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`. | sdk/python/pulumi_aws/iam/access_key.py | status | rapzo/pulumi-aws | 260 | python | @property
@pulumi.getter
def status(self) -> pulumi.Output[Optional[str]]:
'\n \n '
return pulumi.get(self, 'status') | @property
@pulumi.getter
def status(self) -> pulumi.Output[Optional[str]]:
'\n \n '
return pulumi.get(self, 'status')<|docstring|>Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.<|endoftext|> |
62d5beed3a144a03de669c2f1a0d22b06a8247b8bc506513a9b0ba5eb4945b7e | @property
@pulumi.getter
def user(self) -> pulumi.Output[str]:
'\n IAM user to associate with this access key.\n '
return pulumi.get(self, 'user') | IAM user to associate with this access key. | sdk/python/pulumi_aws/iam/access_key.py | user | rapzo/pulumi-aws | 260 | python | @property
@pulumi.getter
def user(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'user') | @property
@pulumi.getter
def user(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'user')<|docstring|>IAM user to associate with this access key.<|endoftext|> |
cd8f6c3f56797d4a2db4af70226b6430ad4bf408ba79ad47d062d0943520b688 | def generate_random_string(chars=ALPHANUMERIC_CHARS, length=STRING_LENGTH):
'\n Generate a random string starting from a set of chars and of fixed length.\n '
return ''.join((random.choice(chars) for _ in range(length))) | Generate a random string starting from a set of chars and of fixed length. | QuestionTime/core/utils.py | generate_random_string | pogginicolo98/QuestionTime | 0 | python | def generate_random_string(chars=ALPHANUMERIC_CHARS, length=STRING_LENGTH):
'\n \n '
return .join((random.choice(chars) for _ in range(length))) | def generate_random_string(chars=ALPHANUMERIC_CHARS, length=STRING_LENGTH):
'\n \n '
return .join((random.choice(chars) for _ in range(length)))<|docstring|>Generate a random string starting from a set of chars and of fixed length.<|endoftext|> |
07501808af04b6144bd1053b6cad72592bfb2581c5be56418125c48f8700c731 | def __init__(self, context, *args, **kwargs):
'Initialize the browser.'
self.browser = context.browser | Initialize the browser. | tests/pages/base.py | __init__ | 20tab/django-bdd-toolkit | 9 | python | def __init__(self, context, *args, **kwargs):
self.browser = context.browser | def __init__(self, context, *args, **kwargs):
self.browser = context.browser<|docstring|>Initialize the browser.<|endoftext|> |
f7bbc19f7a9f39a68048f9ea2810e5e1d368a6e3067b6bc6e15dccb937bd1fc7 | def fit(self, X, y, check_input=True):
'Build a decision tree based on samples X and\n corresponding classifications y.\n\n Parameters\n ----------\n X : array-like or sparse matrix of shape = [n_samples, n_features]\n The training input samples.\n y : array-like, shape = [n_samples] or [n_samples, n_outputs]\n The target values (class labels in classification, real numbers in\n regression).\n check_input : bool (default=True)\n check if the input for numerical features\n\n Attributes\n ----------\n n_features_ : int\n The number of features when ``fit`` is performed.\n\n X_encoders_ : list\n List of LabelEncoders that transforms input from labels to binary\n encodings and vice versa.\n\n y_encoder_ : LabelEncoder\n LabelEncoders that transforms output from labels to binary\n encodings and vice versa.\n\n is_numerical_ : bool array of size [n_features]\n Array flagging which features that are asumed to be numerical\n\n builder_ : TreeBuilder\n Instance of the tree builder\n\n tree_ : Tree\n Instance of the build tree\n\n Returns\n -------\n self : object\n Returns self.\n '
(X_, y_) = check_X_y(X, y)
self.y_encoder_ = ExtendedLabelEncoder()
y_ = self.y_encoder_.fit_transform(y_)
max_np_int = np.iinfo(np.int32).max
if (not isinstance(self.max_depth, (numbers.Integral, np.integer))):
max_depth = max_np_int
else:
max_depth = self.max_depth
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
min_samples_split = (1 if (self.min_samples_split < 1) else self.min_samples_split)
else:
min_samples_split = 1
if isinstance(self.min_entropy_decrease, (np.float, np.integer)):
min_entropy_decrease = (0 if (self.min_entropy_decrease < 0) else self.min_entropy_decrease)
else:
min_entropy_decrease = 0
(_, self.n_features_) = X_.shape
self.is_numerical_ = ([False] * self.n_features_)
X_tmp = np.zeros(X_.shape, dtype=np.float32)
self.X_encoders_ = [ExtendedLabelEncoder() for _ in range(self.n_features_)]
for i in range(self.n_features_):
if (check_input and check_numerical_array(X_[(:, i)])):
self.is_numerical_[i] = True
X_tmp[(:, i)] = X_[(:, i)]
else:
X_tmp[(:, i)] = self.X_encoders_[i].fit_transform(X_[(:, i)])
X_ = X_tmp
if self.prune:
(X_, X_test, y_, y_test) = train_test_split(X_, y_, test_size=0.3)
splitter = Splitter(X_, y_, self.is_numerical_, self.X_encoders_, self.gain_ratio)
self.builder_ = TreeBuilder(splitter, self.y_encoder_, X_.shape[0], self.n_features_, self.is_numerical_, max_depth=max_depth, min_samples_split=min_samples_split, min_entropy_decrease=min_entropy_decrease, prune=self.prune, is_repeating=self.is_repeating)
self.tree_ = Tree(X_encoders=self.X_encoders_, y_encoder=self.y_encoder_)
if self.prune:
self.builder_.build(self.tree_, X_, y_, X_test, y_test)
else:
self.builder_.build(self.tree_, X_, y_)
return self | Build a decision tree based on samples X and
corresponding classifications y.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
check_input : bool (default=True)
check if the input for numerical features
Attributes
----------
n_features_ : int
The number of features when ``fit`` is performed.
X_encoders_ : list
List of LabelEncoders that transforms input from labels to binary
encodings and vice versa.
y_encoder_ : LabelEncoder
LabelEncoders that transforms output from labels to binary
encodings and vice versa.
is_numerical_ : bool array of size [n_features]
Array flagging which features that are asumed to be numerical
builder_ : TreeBuilder
Instance of the tree builder
tree_ : Tree
Instance of the build tree
Returns
-------
self : object
Returns self. | id3/id3.py | fit | salilmishra23/decision-tree-id3 | 31 | python | def fit(self, X, y, check_input=True):
'Build a decision tree based on samples X and\n corresponding classifications y.\n\n Parameters\n ----------\n X : array-like or sparse matrix of shape = [n_samples, n_features]\n The training input samples.\n y : array-like, shape = [n_samples] or [n_samples, n_outputs]\n The target values (class labels in classification, real numbers in\n regression).\n check_input : bool (default=True)\n check if the input for numerical features\n\n Attributes\n ----------\n n_features_ : int\n The number of features when ``fit`` is performed.\n\n X_encoders_ : list\n List of LabelEncoders that transforms input from labels to binary\n encodings and vice versa.\n\n y_encoder_ : LabelEncoder\n LabelEncoders that transforms output from labels to binary\n encodings and vice versa.\n\n is_numerical_ : bool array of size [n_features]\n Array flagging which features that are asumed to be numerical\n\n builder_ : TreeBuilder\n Instance of the tree builder\n\n tree_ : Tree\n Instance of the build tree\n\n Returns\n -------\n self : object\n Returns self.\n '
(X_, y_) = check_X_y(X, y)
self.y_encoder_ = ExtendedLabelEncoder()
y_ = self.y_encoder_.fit_transform(y_)
max_np_int = np.iinfo(np.int32).max
if (not isinstance(self.max_depth, (numbers.Integral, np.integer))):
max_depth = max_np_int
else:
max_depth = self.max_depth
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
min_samples_split = (1 if (self.min_samples_split < 1) else self.min_samples_split)
else:
min_samples_split = 1
if isinstance(self.min_entropy_decrease, (np.float, np.integer)):
min_entropy_decrease = (0 if (self.min_entropy_decrease < 0) else self.min_entropy_decrease)
else:
min_entropy_decrease = 0
(_, self.n_features_) = X_.shape
self.is_numerical_ = ([False] * self.n_features_)
X_tmp = np.zeros(X_.shape, dtype=np.float32)
self.X_encoders_ = [ExtendedLabelEncoder() for _ in range(self.n_features_)]
for i in range(self.n_features_):
if (check_input and check_numerical_array(X_[(:, i)])):
self.is_numerical_[i] = True
X_tmp[(:, i)] = X_[(:, i)]
else:
X_tmp[(:, i)] = self.X_encoders_[i].fit_transform(X_[(:, i)])
X_ = X_tmp
if self.prune:
(X_, X_test, y_, y_test) = train_test_split(X_, y_, test_size=0.3)
splitter = Splitter(X_, y_, self.is_numerical_, self.X_encoders_, self.gain_ratio)
self.builder_ = TreeBuilder(splitter, self.y_encoder_, X_.shape[0], self.n_features_, self.is_numerical_, max_depth=max_depth, min_samples_split=min_samples_split, min_entropy_decrease=min_entropy_decrease, prune=self.prune, is_repeating=self.is_repeating)
self.tree_ = Tree(X_encoders=self.X_encoders_, y_encoder=self.y_encoder_)
if self.prune:
self.builder_.build(self.tree_, X_, y_, X_test, y_test)
else:
self.builder_.build(self.tree_, X_, y_)
return self | def fit(self, X, y, check_input=True):
'Build a decision tree based on samples X and\n corresponding classifications y.\n\n Parameters\n ----------\n X : array-like or sparse matrix of shape = [n_samples, n_features]\n The training input samples.\n y : array-like, shape = [n_samples] or [n_samples, n_outputs]\n The target values (class labels in classification, real numbers in\n regression).\n check_input : bool (default=True)\n check if the input for numerical features\n\n Attributes\n ----------\n n_features_ : int\n The number of features when ``fit`` is performed.\n\n X_encoders_ : list\n List of LabelEncoders that transforms input from labels to binary\n encodings and vice versa.\n\n y_encoder_ : LabelEncoder\n LabelEncoders that transforms output from labels to binary\n encodings and vice versa.\n\n is_numerical_ : bool array of size [n_features]\n Array flagging which features that are asumed to be numerical\n\n builder_ : TreeBuilder\n Instance of the tree builder\n\n tree_ : Tree\n Instance of the build tree\n\n Returns\n -------\n self : object\n Returns self.\n '
(X_, y_) = check_X_y(X, y)
self.y_encoder_ = ExtendedLabelEncoder()
y_ = self.y_encoder_.fit_transform(y_)
max_np_int = np.iinfo(np.int32).max
if (not isinstance(self.max_depth, (numbers.Integral, np.integer))):
max_depth = max_np_int
else:
max_depth = self.max_depth
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
min_samples_split = (1 if (self.min_samples_split < 1) else self.min_samples_split)
else:
min_samples_split = 1
if isinstance(self.min_entropy_decrease, (np.float, np.integer)):
min_entropy_decrease = (0 if (self.min_entropy_decrease < 0) else self.min_entropy_decrease)
else:
min_entropy_decrease = 0
(_, self.n_features_) = X_.shape
self.is_numerical_ = ([False] * self.n_features_)
X_tmp = np.zeros(X_.shape, dtype=np.float32)
self.X_encoders_ = [ExtendedLabelEncoder() for _ in range(self.n_features_)]
for i in range(self.n_features_):
if (check_input and check_numerical_array(X_[(:, i)])):
self.is_numerical_[i] = True
X_tmp[(:, i)] = X_[(:, i)]
else:
X_tmp[(:, i)] = self.X_encoders_[i].fit_transform(X_[(:, i)])
X_ = X_tmp
if self.prune:
(X_, X_test, y_, y_test) = train_test_split(X_, y_, test_size=0.3)
splitter = Splitter(X_, y_, self.is_numerical_, self.X_encoders_, self.gain_ratio)
self.builder_ = TreeBuilder(splitter, self.y_encoder_, X_.shape[0], self.n_features_, self.is_numerical_, max_depth=max_depth, min_samples_split=min_samples_split, min_entropy_decrease=min_entropy_decrease, prune=self.prune, is_repeating=self.is_repeating)
self.tree_ = Tree(X_encoders=self.X_encoders_, y_encoder=self.y_encoder_)
if self.prune:
self.builder_.build(self.tree_, X_, y_, X_test, y_test)
else:
self.builder_.build(self.tree_, X_, y_)
return self<|docstring|>Build a decision tree based on samples X and
corresponding classifications y.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
check_input : bool (default=True)
check if the input for numerical features
Attributes
----------
n_features_ : int
The number of features when ``fit`` is performed.
X_encoders_ : list
List of LabelEncoders that transforms input from labels to binary
encodings and vice versa.
y_encoder_ : LabelEncoder
LabelEncoders that transforms output from labels to binary
encodings and vice versa.
is_numerical_ : bool array of size [n_features]
Array flagging which features that are asumed to be numerical
builder_ : TreeBuilder
Instance of the tree builder
tree_ : Tree
Instance of the build tree
Returns
-------
self : object
Returns self.<|endoftext|> |
5318198842e4a4d3b400cfc49d2ca64c0e65781b27a437dec9e8717b0471352b | def predict(self, X):
'Predict class for every sample in X.\n\n Parameters\n ----------\n X : array-like of shape = [n_samples, n_features_idx]\n The input samples.\n\n Returns\n -------\n y : array of shape = [n_samples]\n '
check_is_fitted(self, 'tree_')
X = check_array(X)
n_features = X.shape[1]
if (n_features != self.n_features_):
raise ValueError('Number of features of the model must match the input. Model n_features is {} and input n_features is {}.'.format(self.n_features_, n_features))
X_ = np.empty(X.shape)
for i in range(self.n_features_):
if self.is_numerical_[i]:
X_[(:, i)] = X[(:, i)]
else:
try:
X_[(:, i)] = self.X_encoders_[i].transform(X[(:, i)])
except ValueError as e:
raise ValueError('New attribute value not found in train data.')
y = self.builder_._predict(self.tree_, X_)
return self.y_encoder_.inverse_transform(y) | Predict class for every sample in X.
Parameters
----------
X : array-like of shape = [n_samples, n_features_idx]
The input samples.
Returns
-------
y : array of shape = [n_samples] | id3/id3.py | predict | salilmishra23/decision-tree-id3 | 31 | python | def predict(self, X):
'Predict class for every sample in X.\n\n Parameters\n ----------\n X : array-like of shape = [n_samples, n_features_idx]\n The input samples.\n\n Returns\n -------\n y : array of shape = [n_samples]\n '
check_is_fitted(self, 'tree_')
X = check_array(X)
n_features = X.shape[1]
if (n_features != self.n_features_):
raise ValueError('Number of features of the model must match the input. Model n_features is {} and input n_features is {}.'.format(self.n_features_, n_features))
X_ = np.empty(X.shape)
for i in range(self.n_features_):
if self.is_numerical_[i]:
X_[(:, i)] = X[(:, i)]
else:
try:
X_[(:, i)] = self.X_encoders_[i].transform(X[(:, i)])
except ValueError as e:
raise ValueError('New attribute value not found in train data.')
y = self.builder_._predict(self.tree_, X_)
return self.y_encoder_.inverse_transform(y) | def predict(self, X):
'Predict class for every sample in X.\n\n Parameters\n ----------\n X : array-like of shape = [n_samples, n_features_idx]\n The input samples.\n\n Returns\n -------\n y : array of shape = [n_samples]\n '
check_is_fitted(self, 'tree_')
X = check_array(X)
n_features = X.shape[1]
if (n_features != self.n_features_):
raise ValueError('Number of features of the model must match the input. Model n_features is {} and input n_features is {}.'.format(self.n_features_, n_features))
X_ = np.empty(X.shape)
for i in range(self.n_features_):
if self.is_numerical_[i]:
X_[(:, i)] = X[(:, i)]
else:
try:
X_[(:, i)] = self.X_encoders_[i].transform(X[(:, i)])
except ValueError as e:
raise ValueError('New attribute value not found in train data.')
y = self.builder_._predict(self.tree_, X_)
return self.y_encoder_.inverse_transform(y)<|docstring|>Predict class for every sample in X.
Parameters
----------
X : array-like of shape = [n_samples, n_features_idx]
The input samples.
Returns
-------
y : array of shape = [n_samples]<|endoftext|> |
4465afbb08471e24e734ec7f4bfc15c2c2b9f95831264781dcc2f723a5a02480 | def predict_proba(self, X):
'Predict class probabilities for every sample in X.\n\n Parameters\n ----------\n X : array-like of shape = [n_samples, n_features_idx]\n The input samples.\n\n Returns\n -------\n y : array of shape = [n_samples, n_classes]\n '
check_is_fitted(self, 'tree_')
X = check_array(X)
n_features = X.shape[1]
if (n_features != self.n_features_):
raise ValueError('Number of features of the model must match the input. Model n_features is {} and input n_features is {}.'.format(self.n_features_, n_features))
X_ = np.empty(X.shape)
for i in range(self.n_features_):
if self.is_numerical_[i]:
X_[(:, i)] = X[(:, i)]
else:
try:
X_[(:, i)] = self.X_encoders_[i].transform(X[(:, i)])
except ValueError as e:
raise ValueError('New attribute value not found in train data.')
y = self.builder_._predict_proba(self.tree_, X_)
return y | Predict class probabilities for every sample in X.
Parameters
----------
X : array-like of shape = [n_samples, n_features_idx]
The input samples.
Returns
-------
y : array of shape = [n_samples, n_classes] | id3/id3.py | predict_proba | salilmishra23/decision-tree-id3 | 31 | python | def predict_proba(self, X):
'Predict class probabilities for every sample in X.\n\n Parameters\n ----------\n X : array-like of shape = [n_samples, n_features_idx]\n The input samples.\n\n Returns\n -------\n y : array of shape = [n_samples, n_classes]\n '
check_is_fitted(self, 'tree_')
X = check_array(X)
n_features = X.shape[1]
if (n_features != self.n_features_):
raise ValueError('Number of features of the model must match the input. Model n_features is {} and input n_features is {}.'.format(self.n_features_, n_features))
X_ = np.empty(X.shape)
for i in range(self.n_features_):
if self.is_numerical_[i]:
X_[(:, i)] = X[(:, i)]
else:
try:
X_[(:, i)] = self.X_encoders_[i].transform(X[(:, i)])
except ValueError as e:
raise ValueError('New attribute value not found in train data.')
y = self.builder_._predict_proba(self.tree_, X_)
return y | def predict_proba(self, X):
'Predict class probabilities for every sample in X.\n\n Parameters\n ----------\n X : array-like of shape = [n_samples, n_features_idx]\n The input samples.\n\n Returns\n -------\n y : array of shape = [n_samples, n_classes]\n '
check_is_fitted(self, 'tree_')
X = check_array(X)
n_features = X.shape[1]
if (n_features != self.n_features_):
raise ValueError('Number of features of the model must match the input. Model n_features is {} and input n_features is {}.'.format(self.n_features_, n_features))
X_ = np.empty(X.shape)
for i in range(self.n_features_):
if self.is_numerical_[i]:
X_[(:, i)] = X[(:, i)]
else:
try:
X_[(:, i)] = self.X_encoders_[i].transform(X[(:, i)])
except ValueError as e:
raise ValueError('New attribute value not found in train data.')
y = self.builder_._predict_proba(self.tree_, X_)
return y<|docstring|>Predict class probabilities for every sample in X.
Parameters
----------
X : array-like of shape = [n_samples, n_features_idx]
The input samples.
Returns
-------
y : array of shape = [n_samples, n_classes]<|endoftext|> |
673e13bf086e8c15d101f0ea57aeef9a119134970190bbdb6db6d34d112d1b56 | @classmethod
def of(cls, value):
'Given either a dictionary or a `ConfigSource` object, return\n a `ConfigSource` object. This lets a function accept either type\n of object as an argument.\n '
if isinstance(value, ConfigSource):
return value
elif isinstance(value, dict):
return ConfigSource(value)
else:
raise TypeError(u'source value must be a dict') | Given either a dictionary or a `ConfigSource` object, return
a `ConfigSource` object. This lets a function accept either type
of object as an argument. | confuse/sources.py | of | ntqr/confuse | 0 | python | @classmethod
def of(cls, value):
'Given either a dictionary or a `ConfigSource` object, return\n a `ConfigSource` object. This lets a function accept either type\n of object as an argument.\n '
if isinstance(value, ConfigSource):
return value
elif isinstance(value, dict):
return ConfigSource(value)
else:
raise TypeError(u'source value must be a dict') | @classmethod
def of(cls, value):
'Given either a dictionary or a `ConfigSource` object, return\n a `ConfigSource` object. This lets a function accept either type\n of object as an argument.\n '
if isinstance(value, ConfigSource):
return value
elif isinstance(value, dict):
return ConfigSource(value)
else:
raise TypeError(u'source value must be a dict')<|docstring|>Given either a dictionary or a `ConfigSource` object, return
a `ConfigSource` object. This lets a function accept either type
of object as an argument.<|endoftext|> |
34753c81f5f36d3f45d0ad616352bdbef3c1d1e5461357175a164280206d855a | def get_sources(country, category):
'\n Function that gets json response to our url request\n '
get_news_url = base_url.format(country, category, api_key)
with urllib.request.urlopen(get_news_url) as url:
get_news_data = url.read()
get_news_response = json.loads(get_news_data.decode('utf'))
source_results = None
if get_news_response['sources']:
source_results_list = get_news_response['sources']
source_results = process_sources(source_results_list)
return source_results | Function that gets json response to our url request | app/request.py | get_sources | JaredAhaza/News-highlights | 0 | python | def get_sources(country, category):
'\n \n '
get_news_url = base_url.format(country, category, api_key)
with urllib.request.urlopen(get_news_url) as url:
get_news_data = url.read()
get_news_response = json.loads(get_news_data.decode('utf'))
source_results = None
if get_news_response['sources']:
source_results_list = get_news_response['sources']
source_results = process_sources(source_results_list)
return source_results | def get_sources(country, category):
'\n \n '
get_news_url = base_url.format(country, category, api_key)
with urllib.request.urlopen(get_news_url) as url:
get_news_data = url.read()
get_news_response = json.loads(get_news_data.decode('utf'))
source_results = None
if get_news_response['sources']:
source_results_list = get_news_response['sources']
source_results = process_sources(source_results_list)
return source_results<|docstring|>Function that gets json response to our url request<|endoftext|> |
f06150b6630e95e7089b6f3bfacd4d86ff00225bec84ca624429649872997a3b | def process_sources(source_list):
'\n Function processing the dictionary and outputs a list of objects\n '
news_results = []
for source in source_list:
id = source.get('id')
name = source.get('name')
description = source.get('description')
url = source.get('url')
category = source.get('category')
country = source.get('country')
if url:
source_object = Source(id, name, description, url, category, country)
news_results.append(source_object)
return news_results | Function processing the dictionary and outputs a list of objects | app/request.py | process_sources | JaredAhaza/News-highlights | 0 | python | def process_sources(source_list):
'\n \n '
news_results = []
for source in source_list:
id = source.get('id')
name = source.get('name')
description = source.get('description')
url = source.get('url')
category = source.get('category')
country = source.get('country')
if url:
source_object = Source(id, name, description, url, category, country)
news_results.append(source_object)
return news_results | def process_sources(source_list):
'\n \n '
news_results = []
for source in source_list:
id = source.get('id')
name = source.get('name')
description = source.get('description')
url = source.get('url')
category = source.get('category')
country = source.get('country')
if url:
source_object = Source(id, name, description, url, category, country)
news_results.append(source_object)
return news_results<|docstring|>Function processing the dictionary and outputs a list of objects<|endoftext|> |
b876e36637dc0d87bf08cd2fcbaf52a63db48b4ea15d8fe81ed4db51f3140539 | def get_articles(id):
'\n Function that gets json response to our url request\n '
get_source_news_url = source_url.format(id, api_key)
with urllib.request.urlopen(get_source_news_url) as url:
get_news_data = url.read()
get_news_response = json.loads(get_news_data.decode('utf'))
news_results = None
if get_news_response['articles']:
news_results_list = get_news_response['articles']
news_results = process_articles(news_results_list)
return news_results | Function that gets json response to our url request | app/request.py | get_articles | JaredAhaza/News-highlights | 0 | python | def get_articles(id):
'\n \n '
get_source_news_url = source_url.format(id, api_key)
with urllib.request.urlopen(get_source_news_url) as url:
get_news_data = url.read()
get_news_response = json.loads(get_news_data.decode('utf'))
news_results = None
if get_news_response['articles']:
news_results_list = get_news_response['articles']
news_results = process_articles(news_results_list)
return news_results | def get_articles(id):
'\n \n '
get_source_news_url = source_url.format(id, api_key)
with urllib.request.urlopen(get_source_news_url) as url:
get_news_data = url.read()
get_news_response = json.loads(get_news_data.decode('utf'))
news_results = None
if get_news_response['articles']:
news_results_list = get_news_response['articles']
news_results = process_articles(news_results_list)
return news_results<|docstring|>Function that gets json response to our url request<|endoftext|> |
ecd74b7f46aa9ab020e5a400ec5e9299c9734b98b6f008e15794bd75e84c4529 | def process_articles(articles_list):
'\n Function processing the dictionary and outputs a list of objects\n '
news_results = []
source_dictionary = {}
for result in articles_list:
source_id = result['source']
source_dictionary['id'] = source_id['id']
source_dictionary['name'] = source_id['name']
id = source_dictionary['id']
name = source_dictionary['name']
author = result.get('author')
title = result.get('title')
description = result.get('description')
url = result.get('url')
urlToImage = result.get('urlToImage')
publishedAt = result.get('publishedAt')
if url:
source_object = Articles(id, name, author, title, description, url, urlToImage, publishedAt)
news_results.append(source_object)
return news_results | Function processing the dictionary and outputs a list of objects | app/request.py | process_articles | JaredAhaza/News-highlights | 0 | python | def process_articles(articles_list):
'\n \n '
news_results = []
source_dictionary = {}
for result in articles_list:
source_id = result['source']
source_dictionary['id'] = source_id['id']
source_dictionary['name'] = source_id['name']
id = source_dictionary['id']
name = source_dictionary['name']
author = result.get('author')
title = result.get('title')
description = result.get('description')
url = result.get('url')
urlToImage = result.get('urlToImage')
publishedAt = result.get('publishedAt')
if url:
source_object = Articles(id, name, author, title, description, url, urlToImage, publishedAt)
news_results.append(source_object)
return news_results | def process_articles(articles_list):
'\n \n '
news_results = []
source_dictionary = {}
for result in articles_list:
source_id = result['source']
source_dictionary['id'] = source_id['id']
source_dictionary['name'] = source_id['name']
id = source_dictionary['id']
name = source_dictionary['name']
author = result.get('author')
title = result.get('title')
description = result.get('description')
url = result.get('url')
urlToImage = result.get('urlToImage')
publishedAt = result.get('publishedAt')
if url:
source_object = Articles(id, name, author, title, description, url, urlToImage, publishedAt)
news_results.append(source_object)
return news_results<|docstring|>Function processing the dictionary and outputs a list of objects<|endoftext|> |
db2fe6ed6d962b1775d878cefab3f0bc449e360ae7b1d93f0ef8d104a2ac03f2 | def check_action_command(ctx, action_command):
'Verifies the action command is valid'
if (action_command not in ['deploy_site', 'update_site', 'redeploy_server']):
ctx.fail('Invalid action command. The action commands available are deploy_site, update_site, and redeploy_server.') | Verifies the action command is valid | src/bin/shipyard_client/shipyard_client/cli/input_checks.py | check_action_command | att-comdev/shipyard | 14 | python | def check_action_command(ctx, action_command):
if (action_command not in ['deploy_site', 'update_site', 'redeploy_server']):
ctx.fail('Invalid action command. The action commands available are deploy_site, update_site, and redeploy_server.') | def check_action_command(ctx, action_command):
if (action_command not in ['deploy_site', 'update_site', 'redeploy_server']):
ctx.fail('Invalid action command. The action commands available are deploy_site, update_site, and redeploy_server.')<|docstring|>Verifies the action command is valid<|endoftext|> |
de58eec3c70f334fb27d8730bf8b0fabc70aa1153ea4d64d60c35ee9e9861996 | def check_control_action(ctx, action):
'Verifies the control action is valid'
if (action not in ['pause', 'unpause', 'stop']):
ctx.fail('Invalid action. Please enter pause, unpause, or stop.') | Verifies the control action is valid | src/bin/shipyard_client/shipyard_client/cli/input_checks.py | check_control_action | att-comdev/shipyard | 14 | python | def check_control_action(ctx, action):
if (action not in ['pause', 'unpause', 'stop']):
ctx.fail('Invalid action. Please enter pause, unpause, or stop.') | def check_control_action(ctx, action):
if (action not in ['pause', 'unpause', 'stop']):
ctx.fail('Invalid action. Please enter pause, unpause, or stop.')<|docstring|>Verifies the control action is valid<|endoftext|> |
293dfc00b336afd54b6a8c476bfd249fc31e37f8bb3af4cca8755fd22ac2a161 | def check_id(ctx, action_id):
'Verifies a ULID id is in a valid format'
if (action_id is None):
ctx.fail('Invalid ID. None is not a valid action ID.')
if (len(action_id) != 26):
ctx.fail('Invalid ID. ID can only be 26 characters.')
if (not action_id.isalnum()):
ctx.fail('Invalid ID. ID can only contain letters and numbers.') | Verifies a ULID id is in a valid format | src/bin/shipyard_client/shipyard_client/cli/input_checks.py | check_id | att-comdev/shipyard | 14 | python | def check_id(ctx, action_id):
if (action_id is None):
ctx.fail('Invalid ID. None is not a valid action ID.')
if (len(action_id) != 26):
ctx.fail('Invalid ID. ID can only be 26 characters.')
if (not action_id.isalnum()):
ctx.fail('Invalid ID. ID can only contain letters and numbers.') | def check_id(ctx, action_id):
if (action_id is None):
ctx.fail('Invalid ID. None is not a valid action ID.')
if (len(action_id) != 26):
ctx.fail('Invalid ID. ID can only be 26 characters.')
if (not action_id.isalnum()):
ctx.fail('Invalid ID. ID can only contain letters and numbers.')<|docstring|>Verifies a ULID id is in a valid format<|endoftext|> |
6b69a5065c7e3650bcd3499a70510a7bf620ea03fc963c2476531ca4b9293dc3 | def check_workflow_id(ctx, workflow_id):
'Verifies that a workflow id matches the desired format'
if (workflow_id is None):
ctx.fail('Invalid ID. None is not a valid workflow ID.')
if ('__' not in workflow_id):
ctx.fail('Invalid ID. The ID must cotain a double underscore separating the workflow name from the execution date')
input_date_string = workflow_id.split('__')[1]
date_format_ok = True
try:
parsed_dt = arrow.get(input_date_string)
if (input_date_string != parsed_dt.format('YYYY-MM-DDTHH:mm:ss.SSSSSS')):
date_format_ok = False
except ParserError:
date_format_ok = False
if (not date_format_ok):
ctx.fail('Invalid ID. The date portion of the ID must conform to YYYY-MM-DDTHH:mm:ss.SSSSSS') | Verifies that a workflow id matches the desired format | src/bin/shipyard_client/shipyard_client/cli/input_checks.py | check_workflow_id | att-comdev/shipyard | 14 | python | def check_workflow_id(ctx, workflow_id):
if (workflow_id is None):
ctx.fail('Invalid ID. None is not a valid workflow ID.')
if ('__' not in workflow_id):
ctx.fail('Invalid ID. The ID must cotain a double underscore separating the workflow name from the execution date')
input_date_string = workflow_id.split('__')[1]
date_format_ok = True
try:
parsed_dt = arrow.get(input_date_string)
if (input_date_string != parsed_dt.format('YYYY-MM-DDTHH:mm:ss.SSSSSS')):
date_format_ok = False
except ParserError:
date_format_ok = False
if (not date_format_ok):
ctx.fail('Invalid ID. The date portion of the ID must conform to YYYY-MM-DDTHH:mm:ss.SSSSSS') | def check_workflow_id(ctx, workflow_id):
if (workflow_id is None):
ctx.fail('Invalid ID. None is not a valid workflow ID.')
if ('__' not in workflow_id):
ctx.fail('Invalid ID. The ID must cotain a double underscore separating the workflow name from the execution date')
input_date_string = workflow_id.split('__')[1]
date_format_ok = True
try:
parsed_dt = arrow.get(input_date_string)
if (input_date_string != parsed_dt.format('YYYY-MM-DDTHH:mm:ss.SSSSSS')):
date_format_ok = False
except ParserError:
date_format_ok = False
if (not date_format_ok):
ctx.fail('Invalid ID. The date portion of the ID must conform to YYYY-MM-DDTHH:mm:ss.SSSSSS')<|docstring|>Verifies that a workflow id matches the desired format<|endoftext|> |
1709f741dba403816e30fc39eca36244a1c2ffade2d1c886d87635601cd57c6a | def check_reformat_parameter(ctx, param):
'Checks for <name>=<value> format'
param_dictionary = {}
try:
for p in param:
values = p.split('=')
param_dictionary[values[0]] = values[1]
except Exception:
ctx.fail((('Invalid parameter or parameter format for ' + p) + '. Please utilize the format: <parameter name>=<parameter value>'))
return param_dictionary | Checks for <name>=<value> format | src/bin/shipyard_client/shipyard_client/cli/input_checks.py | check_reformat_parameter | att-comdev/shipyard | 14 | python | def check_reformat_parameter(ctx, param):
param_dictionary = {}
try:
for p in param:
values = p.split('=')
param_dictionary[values[0]] = values[1]
except Exception:
ctx.fail((('Invalid parameter or parameter format for ' + p) + '. Please utilize the format: <parameter name>=<parameter value>'))
return param_dictionary | def check_reformat_parameter(ctx, param):
param_dictionary = {}
try:
for p in param:
values = p.split('=')
param_dictionary[values[0]] = values[1]
except Exception:
ctx.fail((('Invalid parameter or parameter format for ' + p) + '. Please utilize the format: <parameter name>=<parameter value>'))
return param_dictionary<|docstring|>Checks for <name>=<value> format<|endoftext|> |
866573b41305abcc5dd2fa3c590415ec20fc3b1765248879e4e217b6691dd08b | def check_reformat_versions(ctx, buffer, committed, last_site_action, successful_site_action):
'Checks and reformat version'
versions = []
if buffer:
versions.append('buffer')
if committed:
versions.append('committed')
if last_site_action:
versions.append('last_site_action')
if successful_site_action:
versions.append('successful_site_action')
if (len(versions) == 0):
return ['committed', 'buffer']
elif (len(versions) == 2):
return versions
else:
ctx.fail("Invalid input. User must either\n1. Pass in 0 versions, in which case --buffer and --committed versions are assumed\n2. Pass in 2 valid versions for comparison\n\nValid versions are '--buffer', '--committed', '--last-site-action' and '--successful-site-action'") | Checks and reformat version | src/bin/shipyard_client/shipyard_client/cli/input_checks.py | check_reformat_versions | att-comdev/shipyard | 14 | python | def check_reformat_versions(ctx, buffer, committed, last_site_action, successful_site_action):
versions = []
if buffer:
versions.append('buffer')
if committed:
versions.append('committed')
if last_site_action:
versions.append('last_site_action')
if successful_site_action:
versions.append('successful_site_action')
if (len(versions) == 0):
return ['committed', 'buffer']
elif (len(versions) == 2):
return versions
else:
ctx.fail("Invalid input. User must either\n1. Pass in 0 versions, in which case --buffer and --committed versions are assumed\n2. Pass in 2 valid versions for comparison\n\nValid versions are '--buffer', '--committed', '--last-site-action' and '--successful-site-action'") | def check_reformat_versions(ctx, buffer, committed, last_site_action, successful_site_action):
versions = []
if buffer:
versions.append('buffer')
if committed:
versions.append('committed')
if last_site_action:
versions.append('last_site_action')
if successful_site_action:
versions.append('successful_site_action')
if (len(versions) == 0):
return ['committed', 'buffer']
elif (len(versions) == 2):
return versions
else:
ctx.fail("Invalid input. User must either\n1. Pass in 0 versions, in which case --buffer and --committed versions are assumed\n2. Pass in 2 valid versions for comparison\n\nValid versions are '--buffer', '--committed', '--last-site-action' and '--successful-site-action'")<|docstring|>Checks and reformat version<|endoftext|> |
c93526d4b891629de98aae9158a4d0ef39c90586ca400daf84d9b9eff12ccdf6 | def test_do_not_lock_resources_when_not_ready(self):
" Test to make sure that resources won't go unused waiting on workers "
self.sch.add_task(worker='X', task_id='A', priority=10)
self.sch.add_task(worker='X', task_id='B', resources={'R': 1}, priority=5)
self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=1)
self.sch.update_resources(R=1)
self.sch.add_worker('X', [('workers', 1)])
self.assertEqual('C', self.sch.get_work(worker='Y')['task_id']) | Test to make sure that resources won't go unused waiting on workers | test/central_planner_test.py | test_do_not_lock_resources_when_not_ready | GlobalFishingWatch/luigi | 2 | python | def test_do_not_lock_resources_when_not_ready(self):
" "
self.sch.add_task(worker='X', task_id='A', priority=10)
self.sch.add_task(worker='X', task_id='B', resources={'R': 1}, priority=5)
self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=1)
self.sch.update_resources(R=1)
self.sch.add_worker('X', [('workers', 1)])
self.assertEqual('C', self.sch.get_work(worker='Y')['task_id']) | def test_do_not_lock_resources_when_not_ready(self):
" "
self.sch.add_task(worker='X', task_id='A', priority=10)
self.sch.add_task(worker='X', task_id='B', resources={'R': 1}, priority=5)
self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=1)
self.sch.update_resources(R=1)
self.sch.add_worker('X', [('workers', 1)])
self.assertEqual('C', self.sch.get_work(worker='Y')['task_id'])<|docstring|>Test to make sure that resources won't go unused waiting on workers<|endoftext|> |
db0d4371f1e4c67752fa0f42dfccfd21eeaad3537c9621be14256befe7f2d4cf | def test_do_not_lock_resources_while_running_higher_priority(self):
" Test to make sure that resources won't go unused waiting on workers "
self.sch.add_task(worker='X', task_id='A', priority=10)
self.sch.add_task(worker='X', task_id='B', resources={'R': 1}, priority=5)
self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=1)
self.sch.update_resources(R=1)
self.sch.add_worker('X', [('workers', 1)])
self.assertEqual('A', self.sch.get_work(worker='X')['task_id'])
self.assertEqual('C', self.sch.get_work(worker='Y')['task_id']) | Test to make sure that resources won't go unused waiting on workers | test/central_planner_test.py | test_do_not_lock_resources_while_running_higher_priority | GlobalFishingWatch/luigi | 2 | python | def test_do_not_lock_resources_while_running_higher_priority(self):
" "
self.sch.add_task(worker='X', task_id='A', priority=10)
self.sch.add_task(worker='X', task_id='B', resources={'R': 1}, priority=5)
self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=1)
self.sch.update_resources(R=1)
self.sch.add_worker('X', [('workers', 1)])
self.assertEqual('A', self.sch.get_work(worker='X')['task_id'])
self.assertEqual('C', self.sch.get_work(worker='Y')['task_id']) | def test_do_not_lock_resources_while_running_higher_priority(self):
" "
self.sch.add_task(worker='X', task_id='A', priority=10)
self.sch.add_task(worker='X', task_id='B', resources={'R': 1}, priority=5)
self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=1)
self.sch.update_resources(R=1)
self.sch.add_worker('X', [('workers', 1)])
self.assertEqual('A', self.sch.get_work(worker='X')['task_id'])
self.assertEqual('C', self.sch.get_work(worker='Y')['task_id'])<|docstring|>Test to make sure that resources won't go unused waiting on workers<|endoftext|> |
844e2d5889556b575e5f08e9c0d33e46c8416128860f310f078b1cfce85cb7c9 | def test_lock_resources_while_running_lower_priority(self):
' Make sure resources will be made available while working on lower priority tasks '
self.sch.add_task(worker='X', task_id='A', priority=4)
self.assertEqual('A', self.sch.get_work(worker='X')['task_id'])
self.sch.add_task(worker='X', task_id='B', resources={'R': 1}, priority=5)
self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=1)
self.sch.update_resources(R=1)
self.sch.add_worker('X', [('workers', 1)])
self.assertFalse(self.sch.get_work(worker='Y')['task_id']) | Make sure resources will be made available while working on lower priority tasks | test/central_planner_test.py | test_lock_resources_while_running_lower_priority | GlobalFishingWatch/luigi | 2 | python | def test_lock_resources_while_running_lower_priority(self):
' '
self.sch.add_task(worker='X', task_id='A', priority=4)
self.assertEqual('A', self.sch.get_work(worker='X')['task_id'])
self.sch.add_task(worker='X', task_id='B', resources={'R': 1}, priority=5)
self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=1)
self.sch.update_resources(R=1)
self.sch.add_worker('X', [('workers', 1)])
self.assertFalse(self.sch.get_work(worker='Y')['task_id']) | def test_lock_resources_while_running_lower_priority(self):
' '
self.sch.add_task(worker='X', task_id='A', priority=4)
self.assertEqual('A', self.sch.get_work(worker='X')['task_id'])
self.sch.add_task(worker='X', task_id='B', resources={'R': 1}, priority=5)
self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=1)
self.sch.update_resources(R=1)
self.sch.add_worker('X', [('workers', 1)])
self.assertFalse(self.sch.get_work(worker='Y')['task_id'])<|docstring|>Make sure resources will be made available while working on lower priority tasks<|endoftext|> |
cd57054048f346ed7e5520b587cf1035f257174912a191f19515e4588e76f73b | def test_quadratic_behavior(self):
' Test that get_work is not taking linear amount of time.\n\n This is of course impossible to test, however, doing reasonable\n assumptions about hardware. This time should finish in a timely\n manner.\n '
NUM_TASKS = 10000
for i in range(NUM_TASKS):
self.sch.add_task(worker=str(i), task_id=str(i), resources={})
for i in range(NUM_TASKS):
self.assertEqual(self.sch.get_work(worker=str(i))['task_id'], str(i))
self.sch.add_task(worker=str(i), task_id=str(i), status=DONE) | Test that get_work is not taking linear amount of time.
This is of course impossible to test, however, doing reasonable
assumptions about hardware. This time should finish in a timely
manner. | test/central_planner_test.py | test_quadratic_behavior | GlobalFishingWatch/luigi | 2 | python | def test_quadratic_behavior(self):
' Test that get_work is not taking linear amount of time.\n\n This is of course impossible to test, however, doing reasonable\n assumptions about hardware. This time should finish in a timely\n manner.\n '
NUM_TASKS = 10000
for i in range(NUM_TASKS):
self.sch.add_task(worker=str(i), task_id=str(i), resources={})
for i in range(NUM_TASKS):
self.assertEqual(self.sch.get_work(worker=str(i))['task_id'], str(i))
self.sch.add_task(worker=str(i), task_id=str(i), status=DONE) | def test_quadratic_behavior(self):
' Test that get_work is not taking linear amount of time.\n\n This is of course impossible to test, however, doing reasonable\n assumptions about hardware. This time should finish in a timely\n manner.\n '
NUM_TASKS = 10000
for i in range(NUM_TASKS):
self.sch.add_task(worker=str(i), task_id=str(i), resources={})
for i in range(NUM_TASKS):
self.assertEqual(self.sch.get_work(worker=str(i))['task_id'], str(i))
self.sch.add_task(worker=str(i), task_id=str(i), status=DONE)<|docstring|>Test that get_work is not taking linear amount of time.
This is of course impossible to test, however, doing reasonable
assumptions about hardware. This time should finish in a timely
manner.<|endoftext|> |
9da6291fcd7ed2467a6c0384c2469677120d0c95f68e3ab9d285e13810bb7983 | def test_get_work_speed(self):
' Test that get_work is fast for few workers and many DONEs.\n\n In #986, @daveFNbuck reported that he got a slowdown.\n '
NUM_PENDING = 1000
NUM_DONE = 200000
assert (NUM_DONE >= NUM_PENDING)
for i in range(NUM_PENDING):
self.sch.add_task(worker=WORKER, task_id=str(i), resources={})
for i in range(NUM_PENDING, NUM_DONE):
self.sch.add_task(worker=WORKER, task_id=str(i), status=DONE)
for i in range(NUM_PENDING):
res = int(self.sch.get_work(worker=WORKER)['task_id'])
self.assertTrue((0 <= res < NUM_PENDING))
self.sch.add_task(worker=WORKER, task_id=str(res), status=DONE) | Test that get_work is fast for few workers and many DONEs.
In #986, @daveFNbuck reported that he got a slowdown. | test/central_planner_test.py | test_get_work_speed | GlobalFishingWatch/luigi | 2 | python | def test_get_work_speed(self):
' Test that get_work is fast for few workers and many DONEs.\n\n In #986, @daveFNbuck reported that he got a slowdown.\n '
NUM_PENDING = 1000
NUM_DONE = 200000
assert (NUM_DONE >= NUM_PENDING)
for i in range(NUM_PENDING):
self.sch.add_task(worker=WORKER, task_id=str(i), resources={})
for i in range(NUM_PENDING, NUM_DONE):
self.sch.add_task(worker=WORKER, task_id=str(i), status=DONE)
for i in range(NUM_PENDING):
res = int(self.sch.get_work(worker=WORKER)['task_id'])
self.assertTrue((0 <= res < NUM_PENDING))
self.sch.add_task(worker=WORKER, task_id=str(res), status=DONE) | def test_get_work_speed(self):
' Test that get_work is fast for few workers and many DONEs.\n\n In #986, @daveFNbuck reported that he got a slowdown.\n '
NUM_PENDING = 1000
NUM_DONE = 200000
assert (NUM_DONE >= NUM_PENDING)
for i in range(NUM_PENDING):
self.sch.add_task(worker=WORKER, task_id=str(i), resources={})
for i in range(NUM_PENDING, NUM_DONE):
self.sch.add_task(worker=WORKER, task_id=str(i), status=DONE)
for i in range(NUM_PENDING):
res = int(self.sch.get_work(worker=WORKER)['task_id'])
self.assertTrue((0 <= res < NUM_PENDING))
self.sch.add_task(worker=WORKER, task_id=str(res), status=DONE)<|docstring|>Test that get_work is fast for few workers and many DONEs.
In #986, @daveFNbuck reported that he got a slowdown.<|endoftext|> |
f393f52ab0cab68e769d1534bd68dd2d711722cca41e45bd8750a67d80ac2cfb | @pytest.mark.django_db
@patch('polaris.management.commands.check_trustlines.create_stellar_deposit')
@patch('polaris.management.commands.check_trustlines.settings.HORIZON_SERVER', mock_server)
def test_deposit_check_trustlines_success(acc1_usd_deposit_transaction_factory):
'\n Creates a transaction with status `pending_trust` and checks that\n `check_trustlines` changes its status to `completed`. All the necessary\n functionality and conditions are mocked for determinism.\n '
deposit = acc1_usd_deposit_transaction_factory()
deposit.status = Transaction.STATUS.pending_trust
deposit.save()
deposit.refresh_from_db = Mock()
CheckTrustlinesCMD.check_trustlines()
assert deposit.refresh_from_db.was_called | Creates a transaction with status `pending_trust` and checks that
`check_trustlines` changes its status to `completed`. All the necessary
functionality and conditions are mocked for determinism. | polaris/polaris/tests/processes/test_check_trustlines.py | test_deposit_check_trustlines_success | brunopedrazza/django-polaris | 0 | python | @pytest.mark.django_db
@patch('polaris.management.commands.check_trustlines.create_stellar_deposit')
@patch('polaris.management.commands.check_trustlines.settings.HORIZON_SERVER', mock_server)
def test_deposit_check_trustlines_success(acc1_usd_deposit_transaction_factory):
'\n Creates a transaction with status `pending_trust` and checks that\n `check_trustlines` changes its status to `completed`. All the necessary\n functionality and conditions are mocked for determinism.\n '
deposit = acc1_usd_deposit_transaction_factory()
deposit.status = Transaction.STATUS.pending_trust
deposit.save()
deposit.refresh_from_db = Mock()
CheckTrustlinesCMD.check_trustlines()
assert deposit.refresh_from_db.was_called | @pytest.mark.django_db
@patch('polaris.management.commands.check_trustlines.create_stellar_deposit')
@patch('polaris.management.commands.check_trustlines.settings.HORIZON_SERVER', mock_server)
def test_deposit_check_trustlines_success(acc1_usd_deposit_transaction_factory):
'\n Creates a transaction with status `pending_trust` and checks that\n `check_trustlines` changes its status to `completed`. All the necessary\n functionality and conditions are mocked for determinism.\n '
deposit = acc1_usd_deposit_transaction_factory()
deposit.status = Transaction.STATUS.pending_trust
deposit.save()
deposit.refresh_from_db = Mock()
CheckTrustlinesCMD.check_trustlines()
assert deposit.refresh_from_db.was_called<|docstring|>Creates a transaction with status `pending_trust` and checks that
`check_trustlines` changes its status to `completed`. All the necessary
functionality and conditions are mocked for determinism.<|endoftext|> |
64f84d0e54c40a0df54b2f6095efdcafd3b7c46c24196e5d7a43ee49a2965398 | def checkPositive(func):
' Chequea que los parametros sean numeros positivos '
def wrapper(x):
if (x < 0):
raise ValueError('Value must be positive')
return func(x)
return wrapper | Chequea que los parametros sean numeros positivos | src/decorators.py | checkPositive | CaC-Grupo-9/backend | 1 | python | def checkPositive(func):
' '
def wrapper(x):
if (x < 0):
raise ValueError('Value must be positive')
return func(x)
return wrapper | def checkPositive(func):
' '
def wrapper(x):
if (x < 0):
raise ValueError('Value must be positive')
return func(x)
return wrapper<|docstring|>Chequea que los parametros sean numeros positivos<|endoftext|> |
f3d111b8a81ef2c29f92bd28313e2832b586d05b8d58512e755e7d95692befca | def myDecorator(func):
' Decorador que imprime el nombre de la funcion '
def wrapper(*args, **kwargs):
print(func.__name__)
return func(*args, **kwargs)
return wrapper | Decorador que imprime el nombre de la funcion | src/decorators.py | myDecorator | CaC-Grupo-9/backend | 1 | python | def myDecorator(func):
' '
def wrapper(*args, **kwargs):
print(func.__name__)
return func(*args, **kwargs)
return wrapper | def myDecorator(func):
' '
def wrapper(*args, **kwargs):
print(func.__name__)
return func(*args, **kwargs)
return wrapper<|docstring|>Decorador que imprime el nombre de la funcion<|endoftext|> |
678f4a34445a4f794a970a2b45cb584a5f1c504079e3da4171c1ba634d25d4ab | def square_distance(src, dst):
'\n Calculate Euclid distance between each two points.\n src^T * dst = xn * xm + yn * ym + zn * zmοΌ\n sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;\n sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;\n dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2\n = sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst\n Input:\n src: source points, [B, N, C]\n dst: target points, [B, M, C]\n Output:\n dist: per-point square distance, [B, N, M]\n '
return torch.sum(((src[(:, :, None)] - dst[(:, None)]) ** 2), dim=(- 1)) | Calculate Euclid distance between each two points.
src^T * dst = xn * xm + yn * ym + zn * zmοΌ
sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;
sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;
dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2
= sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst
Input:
src: source points, [B, N, C]
dst: target points, [B, M, C]
Output:
dist: per-point square distance, [B, N, M] | models_dev/pct_utils.py | square_distance | A-suozhang/SpatioTemporalSegmentation-ScanNet | 1 | python | def square_distance(src, dst):
'\n Calculate Euclid distance between each two points.\n src^T * dst = xn * xm + yn * ym + zn * zmοΌ\n sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;\n sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;\n dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2\n = sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst\n Input:\n src: source points, [B, N, C]\n dst: target points, [B, M, C]\n Output:\n dist: per-point square distance, [B, N, M]\n '
return torch.sum(((src[(:, :, None)] - dst[(:, None)]) ** 2), dim=(- 1)) | def square_distance(src, dst):
'\n Calculate Euclid distance between each two points.\n src^T * dst = xn * xm + yn * ym + zn * zmοΌ\n sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;\n sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;\n dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2\n = sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst\n Input:\n src: source points, [B, N, C]\n dst: target points, [B, M, C]\n Output:\n dist: per-point square distance, [B, N, M]\n '
return torch.sum(((src[(:, :, None)] - dst[(:, None)]) ** 2), dim=(- 1))<|docstring|>Calculate Euclid distance between each two points.
src^T * dst = xn * xm + yn * ym + zn * zmοΌ
sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;
sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;
dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2
= sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst
Input:
src: source points, [B, N, C]
dst: target points, [B, M, C]
Output:
dist: per-point square distance, [B, N, M]<|endoftext|> |
920ac832349211d7cea6b279fa1fc58afadad79eefb6edcb3725dcbdc8ec708e | def index_points_cuda(points, idx):
'\n\n Input:\n points: input points data, [B, N, C]\n idx: sample index data, [B, S]\n Return:\n new_points:, indexed points data, [B, S, C]\n '
points = points.transpose(1, 2).contiguous()
new_points = index_points_cuda_transpose(points, idx)
return new_points.transpose(1, 2).contiguous() | Input:
points: input points data, [B, N, C]
idx: sample index data, [B, S]
Return:
new_points:, indexed points data, [B, S, C] | models_dev/pct_utils.py | index_points_cuda | A-suozhang/SpatioTemporalSegmentation-ScanNet | 1 | python | def index_points_cuda(points, idx):
'\n\n Input:\n points: input points data, [B, N, C]\n idx: sample index data, [B, S]\n Return:\n new_points:, indexed points data, [B, S, C]\n '
points = points.transpose(1, 2).contiguous()
new_points = index_points_cuda_transpose(points, idx)
return new_points.transpose(1, 2).contiguous() | def index_points_cuda(points, idx):
'\n\n Input:\n points: input points data, [B, N, C]\n idx: sample index data, [B, S]\n Return:\n new_points:, indexed points data, [B, S, C]\n '
points = points.transpose(1, 2).contiguous()
new_points = index_points_cuda_transpose(points, idx)
return new_points.transpose(1, 2).contiguous()<|docstring|>Input:
points: input points data, [B, N, C]
idx: sample index data, [B, S]
Return:
new_points:, indexed points data, [B, S, C]<|endoftext|> |
f092ce066e048c328c2efdb996a24b62313f475f57da3e3bbb1c1ad742ff677b | def sample_and_group_cuda(npoint, k, xyz, points, cat_xyz_feature=True):
'\n Input:\n npoint:\n k:\n xyz: input points position data, [B, N, 3]\n points: input points data, [B, C, N]\n Return:\n new_xyz: sampled points position data, [B, 3, npoint]\n new_points: sampled points data, [B, C+C_xyz, npoint, k]\n grouped_xyz_norm: sampled relative points position data, [B, 3, npoint, k]\n '
k = min(npoint, k)
knn = KNN(k=k, transpose_mode=True)
(B, N, C_xyz) = xyz.shape
if (npoint < N):
fps_idx = farthest_point_sample_cuda(xyz, npoint)
torch.cuda.empty_cache()
new_xyz = index_points_cuda(xyz, fps_idx)
else:
new_xyz = xyz
torch.cuda.empty_cache()
(_, idx) = knn(xyz.contiguous(), new_xyz)
idx = idx.int()
torch.cuda.empty_cache()
grouped_xyz = grouping_operation_cuda(xyz.transpose(1, 2).contiguous(), idx).permute(0, 2, 3, 1)
torch.cuda.empty_cache()
try:
grouped_xyz_norm = (grouped_xyz - new_xyz.view((- 1), min(npoint, N), 1, C_xyz))
except:
import ipdb
ipdb.set_trace()
grouped_xyz_norm = grouped_xyz_norm.permute(0, 3, 1, 2).contiguous()
torch.cuda.empty_cache()
grouped_points = grouping_operation_cuda(points.contiguous(), idx)
if cat_xyz_feature:
new_points = torch.cat([grouped_xyz_norm, grouped_points], dim=1)
else:
new_points = grouped_points
return (new_xyz.transpose(1, 2), grouped_xyz_norm, new_points) | Input:
npoint:
k:
xyz: input points position data, [B, N, 3]
points: input points data, [B, C, N]
Return:
new_xyz: sampled points position data, [B, 3, npoint]
new_points: sampled points data, [B, C+C_xyz, npoint, k]
grouped_xyz_norm: sampled relative points position data, [B, 3, npoint, k] | models_dev/pct_utils.py | sample_and_group_cuda | A-suozhang/SpatioTemporalSegmentation-ScanNet | 1 | python | def sample_and_group_cuda(npoint, k, xyz, points, cat_xyz_feature=True):
'\n Input:\n npoint:\n k:\n xyz: input points position data, [B, N, 3]\n points: input points data, [B, C, N]\n Return:\n new_xyz: sampled points position data, [B, 3, npoint]\n new_points: sampled points data, [B, C+C_xyz, npoint, k]\n grouped_xyz_norm: sampled relative points position data, [B, 3, npoint, k]\n '
k = min(npoint, k)
knn = KNN(k=k, transpose_mode=True)
(B, N, C_xyz) = xyz.shape
if (npoint < N):
fps_idx = farthest_point_sample_cuda(xyz, npoint)
torch.cuda.empty_cache()
new_xyz = index_points_cuda(xyz, fps_idx)
else:
new_xyz = xyz
torch.cuda.empty_cache()
(_, idx) = knn(xyz.contiguous(), new_xyz)
idx = idx.int()
torch.cuda.empty_cache()
grouped_xyz = grouping_operation_cuda(xyz.transpose(1, 2).contiguous(), idx).permute(0, 2, 3, 1)
torch.cuda.empty_cache()
try:
grouped_xyz_norm = (grouped_xyz - new_xyz.view((- 1), min(npoint, N), 1, C_xyz))
except:
import ipdb
ipdb.set_trace()
grouped_xyz_norm = grouped_xyz_norm.permute(0, 3, 1, 2).contiguous()
torch.cuda.empty_cache()
grouped_points = grouping_operation_cuda(points.contiguous(), idx)
if cat_xyz_feature:
new_points = torch.cat([grouped_xyz_norm, grouped_points], dim=1)
else:
new_points = grouped_points
return (new_xyz.transpose(1, 2), grouped_xyz_norm, new_points) | def sample_and_group_cuda(npoint, k, xyz, points, cat_xyz_feature=True):
'\n Input:\n npoint:\n k:\n xyz: input points position data, [B, N, 3]\n points: input points data, [B, C, N]\n Return:\n new_xyz: sampled points position data, [B, 3, npoint]\n new_points: sampled points data, [B, C+C_xyz, npoint, k]\n grouped_xyz_norm: sampled relative points position data, [B, 3, npoint, k]\n '
k = min(npoint, k)
knn = KNN(k=k, transpose_mode=True)
(B, N, C_xyz) = xyz.shape
if (npoint < N):
fps_idx = farthest_point_sample_cuda(xyz, npoint)
torch.cuda.empty_cache()
new_xyz = index_points_cuda(xyz, fps_idx)
else:
new_xyz = xyz
torch.cuda.empty_cache()
(_, idx) = knn(xyz.contiguous(), new_xyz)
idx = idx.int()
torch.cuda.empty_cache()
grouped_xyz = grouping_operation_cuda(xyz.transpose(1, 2).contiguous(), idx).permute(0, 2, 3, 1)
torch.cuda.empty_cache()
try:
grouped_xyz_norm = (grouped_xyz - new_xyz.view((- 1), min(npoint, N), 1, C_xyz))
except:
import ipdb
ipdb.set_trace()
grouped_xyz_norm = grouped_xyz_norm.permute(0, 3, 1, 2).contiguous()
torch.cuda.empty_cache()
grouped_points = grouping_operation_cuda(points.contiguous(), idx)
if cat_xyz_feature:
new_points = torch.cat([grouped_xyz_norm, grouped_points], dim=1)
else:
new_points = grouped_points
return (new_xyz.transpose(1, 2), grouped_xyz_norm, new_points)<|docstring|>Input:
npoint:
k:
xyz: input points position data, [B, N, 3]
points: input points data, [B, C, N]
Return:
new_xyz: sampled points position data, [B, 3, npoint]
new_points: sampled points data, [B, C+C_xyz, npoint, k]
grouped_xyz_norm: sampled relative points position data, [B, 3, npoint, k]<|endoftext|> |
d3dcf2d85a59dfc4631d313f54b4c1a277290426b620d566bf16789e0b381dab | def forward(self, xyz, points):
"\n Input:\n xyz: input points position data, [B, 3, N]\n points: input points data, [B, C, N]\n Return:\n gew_xyz: sampled points position data, [B, C, S]\n new_points_concat: sample points feature data, [B, D', S]\n "
(B, input_dim, npoint) = list(xyz.size())
xyz = xyz.permute(0, 2, 1)
FIXED_NUM_POINTS = False
if FIXED_NUM_POINTS:
npoint = self.npoint
else:
ds_ratio = 2
npoint = (npoint // ds_ratio)
(new_xyz, grouped_xyz_norm, new_points) = sample_and_group_cuda(npoint, self.k, xyz, points, cat_xyz_feature=self.cat_xyz_feature)
for (i, conv) in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
new_points = F.relu(bn(conv(new_points)))
new_points_pooled = torch.max(new_points, 3)[0]
return (new_xyz, new_points_pooled) | Input:
xyz: input points position data, [B, 3, N]
points: input points data, [B, C, N]
Return:
gew_xyz: sampled points position data, [B, C, S]
new_points_concat: sample points feature data, [B, D', S] | models_dev/pct_utils.py | forward | A-suozhang/SpatioTemporalSegmentation-ScanNet | 1 | python | def forward(self, xyz, points):
"\n Input:\n xyz: input points position data, [B, 3, N]\n points: input points data, [B, C, N]\n Return:\n gew_xyz: sampled points position data, [B, C, S]\n new_points_concat: sample points feature data, [B, D', S]\n "
(B, input_dim, npoint) = list(xyz.size())
xyz = xyz.permute(0, 2, 1)
FIXED_NUM_POINTS = False
if FIXED_NUM_POINTS:
npoint = self.npoint
else:
ds_ratio = 2
npoint = (npoint // ds_ratio)
(new_xyz, grouped_xyz_norm, new_points) = sample_and_group_cuda(npoint, self.k, xyz, points, cat_xyz_feature=self.cat_xyz_feature)
for (i, conv) in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
new_points = F.relu(bn(conv(new_points)))
new_points_pooled = torch.max(new_points, 3)[0]
return (new_xyz, new_points_pooled) | def forward(self, xyz, points):
"\n Input:\n xyz: input points position data, [B, 3, N]\n points: input points data, [B, C, N]\n Return:\n gew_xyz: sampled points position data, [B, C, S]\n new_points_concat: sample points feature data, [B, D', S]\n "
(B, input_dim, npoint) = list(xyz.size())
xyz = xyz.permute(0, 2, 1)
FIXED_NUM_POINTS = False
if FIXED_NUM_POINTS:
npoint = self.npoint
else:
ds_ratio = 2
npoint = (npoint // ds_ratio)
(new_xyz, grouped_xyz_norm, new_points) = sample_and_group_cuda(npoint, self.k, xyz, points, cat_xyz_feature=self.cat_xyz_feature)
for (i, conv) in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
new_points = F.relu(bn(conv(new_points)))
new_points_pooled = torch.max(new_points, 3)[0]
return (new_xyz, new_points_pooled)<|docstring|>Input:
xyz: input points position data, [B, 3, N]
points: input points data, [B, C, N]
Return:
gew_xyz: sampled points position data, [B, C, S]
new_points_concat: sample points feature data, [B, D', S]<|endoftext|> |
a2477b5e2efad2a55c0a99fca64bc88924f268521d9997369fc2e2137764488e | def forward(self, xyz_1, xyz_2, points_1, points_2):
"\n Input:\n M < N\n xyz_1: input points position data, [B, 3, M]\n xyz_2: input points position data, [B, 3, N]\n points_1: input points data, [B, C, M]\n points_2: input points data, [B, C, N]\n\n interpolate xyz_2's coordinates feature with knn neighbor's features weighted by inverse distance\n\n Return:\n new_xyz: sampled points position data, [B, C, S]\n new_points_concat: sample points feature data, [B, D', S]\n "
(B, input_dim, M) = list(points_1.size())
(B, output_dim, N) = list(points_2.size())
if self.CONCAT_FEATS:
pass
else:
points_1 = self.linear_1(points_1)
points_2 = self.linear_2(points_2)
dists = square_distance(xyz_2.transpose(1, 2), xyz_1.transpose(1, 2))
(dists, idx) = dists.sort(dim=(- 1))
(dists, idx) = (dists[(:, :, :self.k)], idx[(:, :, :self.k)])
dist_recip = (1.0 / (dists + 0.1))
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = (dist_recip / norm)
interpolated_points = torch.sum((grouping_operation_cuda(points_1, idx.int()) * weight.view(B, 1, N, 3)), dim=(- 1))
if self.CONCAT_FEATS:
new_points = torch.cat([interpolated_points, points_2], dim=1)
new_points = self.projection(new_points)
return (xyz_2, new_points)
return (xyz_2, (interpolated_points + points_2)) | Input:
M < N
xyz_1: input points position data, [B, 3, M]
xyz_2: input points position data, [B, 3, N]
points_1: input points data, [B, C, M]
points_2: input points data, [B, C, N]
interpolate xyz_2's coordinates feature with knn neighbor's features weighted by inverse distance
Return:
new_xyz: sampled points position data, [B, C, S]
new_points_concat: sample points feature data, [B, D', S] | models_dev/pct_utils.py | forward | A-suozhang/SpatioTemporalSegmentation-ScanNet | 1 | python | def forward(self, xyz_1, xyz_2, points_1, points_2):
"\n Input:\n M < N\n xyz_1: input points position data, [B, 3, M]\n xyz_2: input points position data, [B, 3, N]\n points_1: input points data, [B, C, M]\n points_2: input points data, [B, C, N]\n\n interpolate xyz_2's coordinates feature with knn neighbor's features weighted by inverse distance\n\n Return:\n new_xyz: sampled points position data, [B, C, S]\n new_points_concat: sample points feature data, [B, D', S]\n "
(B, input_dim, M) = list(points_1.size())
(B, output_dim, N) = list(points_2.size())
if self.CONCAT_FEATS:
pass
else:
points_1 = self.linear_1(points_1)
points_2 = self.linear_2(points_2)
dists = square_distance(xyz_2.transpose(1, 2), xyz_1.transpose(1, 2))
(dists, idx) = dists.sort(dim=(- 1))
(dists, idx) = (dists[(:, :, :self.k)], idx[(:, :, :self.k)])
dist_recip = (1.0 / (dists + 0.1))
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = (dist_recip / norm)
interpolated_points = torch.sum((grouping_operation_cuda(points_1, idx.int()) * weight.view(B, 1, N, 3)), dim=(- 1))
if self.CONCAT_FEATS:
new_points = torch.cat([interpolated_points, points_2], dim=1)
new_points = self.projection(new_points)
return (xyz_2, new_points)
return (xyz_2, (interpolated_points + points_2)) | def forward(self, xyz_1, xyz_2, points_1, points_2):
"\n Input:\n M < N\n xyz_1: input points position data, [B, 3, M]\n xyz_2: input points position data, [B, 3, N]\n points_1: input points data, [B, C, M]\n points_2: input points data, [B, C, N]\n\n interpolate xyz_2's coordinates feature with knn neighbor's features weighted by inverse distance\n\n Return:\n new_xyz: sampled points position data, [B, C, S]\n new_points_concat: sample points feature data, [B, D', S]\n "
(B, input_dim, M) = list(points_1.size())
(B, output_dim, N) = list(points_2.size())
if self.CONCAT_FEATS:
pass
else:
points_1 = self.linear_1(points_1)
points_2 = self.linear_2(points_2)
dists = square_distance(xyz_2.transpose(1, 2), xyz_1.transpose(1, 2))
(dists, idx) = dists.sort(dim=(- 1))
(dists, idx) = (dists[(:, :, :self.k)], idx[(:, :, :self.k)])
dist_recip = (1.0 / (dists + 0.1))
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = (dist_recip / norm)
interpolated_points = torch.sum((grouping_operation_cuda(points_1, idx.int()) * weight.view(B, 1, N, 3)), dim=(- 1))
if self.CONCAT_FEATS:
new_points = torch.cat([interpolated_points, points_2], dim=1)
new_points = self.projection(new_points)
return (xyz_2, new_points)
return (xyz_2, (interpolated_points + points_2))<|docstring|>Input:
M < N
xyz_1: input points position data, [B, 3, M]
xyz_2: input points position data, [B, 3, N]
points_1: input points data, [B, C, M]
points_2: input points data, [B, C, N]
interpolate xyz_2's coordinates feature with knn neighbor's features weighted by inverse distance
Return:
new_xyz: sampled points position data, [B, C, S]
new_points_concat: sample points feature data, [B, D', S]<|endoftext|> |
be61df59bb2fd6bf1dc578bbf8d7748e162c71b9d6595575fc0eec91246df057 | def forward(self, input_p, input_x):
'\n input_p: B, 3, npoint\n input_x: B, in_dim, npoint\n '
(B, in_dim, npoint) = list(input_x.size())
n_sample = self.n_sample
k = min(n_sample, npoint)
if (not self.use_vector_attn):
h = self.nhead
input_p = input_p.permute([0, 2, 1])
self.register_buffer('in_xyz_map', input_p)
if (self.fps_rate is not None):
npoint = (npoint // self.fps_rate)
fps_idx = farthest_point_sample_cuda(input_p, npoint)
torch.cuda.empty_cache()
input_p_fps = index_points_cuda(input_p, fps_idx)
if self.SKIP_ALL:
input_p_reduced = input_p_fps.transpose(1, 2)
input_x_reduced = index_points_cuda(self.tmp_linear(input_x).transpose(1, 2), fps_idx).transpose(1, 2)
return (input_p_reduced, input_x_reduced)
else:
input_p_fps = input_p
input_x_fps = input_x
res = input_x
if self.USE_KNN:
self.knn = KNN(k=k, transpose_mode=True)
(_, idx) = self.knn(input_p.contiguous(), input_p_fps.contiguous())
idx = idx.int()
else:
idx = query_ball_point_cuda(self.radius, k, input_p.contiguous(), input_p_fps.contiguous())
grouped_input_p = grouping_operation_cuda(input_p.transpose(1, 2).contiguous(), idx)
grouped_input_x = grouping_operation_cuda(input_x.contiguous(), idx)
self.register_buffer('neighbor_map', idx)
if (self.fps_rate is not None):
if self.SKIP_ATTN:
pass
else:
input_x = self.linear_top(input_x)
elif self.SKIP_ATTN:
pass
else:
input_x = self.linear_top(input_x)
if self.SKIP_ATTN:
if self.POS_ENCODING:
relative_xyz = (input_p_fps.permute([0, 2, 1])[(:, :, :, None)] - grouped_input_p)
pos_encoding = self.delta(relative_xyz)
if self.CAT_POS:
alpha = self.alpha(torch.cat([grouped_input_x, relative_xyz], dim=1))
else:
alpha = self.alpha((grouped_input_x + pos_encoding))
else:
alpha = self.alpha(grouped_input_x)
y = alpha.max(dim=(- 1))[0]
y = self.linear_down(y)
if (self.fps_rate is not None):
input_p_reduced = input_p_fps.transpose(1, 2)
input_x_reduced = y
return (input_p_reduced, input_x_reduced)
else:
input_p_reduced = input_p_fps.transpose(1, 2)
input_x_reduced = (y + res)
return (input_p_reduced, input_x_reduced)
if (self.fps_rate is not None):
input_x_fps = index_points_cuda(input_x.transpose(1, 2), fps_idx).transpose(1, 2)
phi = self.phi(input_x_fps)
else:
phi = self.phi(input_x)
phi = phi[(:, :, :, None)].repeat(1, 1, 1, k)
psi = grouping_operation_cuda(self.psi(input_x).contiguous(), idx)
self.skip_knn = True
alpha = grouping_operation_cuda(self.alpha(input_x).contiguous(), idx)
if self.POS_ENCODING:
relative_xyz = (input_p_fps.permute([0, 2, 1])[(:, :, :, None)] - grouped_input_p)
pos_encoding = self.delta(relative_xyz)
if self.use_vector_attn:
if self.POS_ENCODING:
assert ((self.V_POS_ONLY and self.QK_POS_ONLY) is False)
if self.V_POS_ONLY:
attn_map = F.softmax(self.gamma((phi - psi)), dim=(- 1))
else:
attn_map = F.softmax(self.gamma(((phi - psi) + pos_encoding)), dim=(- 1))
if self.QK_POS_ONLY:
y = (attn_map.repeat(1, (self.out_dim // self.vector_dim), 1, 1) * alpha)
else:
y = (attn_map.repeat(1, (self.out_dim // self.vector_dim), 1, 1) * (alpha + pos_encoding))
else:
attn_map = F.softmax(self.gamma((phi - psi)), dim=(- 1))
y = (attn_map.repeat(1, (self.out_dim // self.vector_dim), 1, 1) * alpha)
if self.MAX_POOL:
y = y.max(dim=(- 1))[0]
else:
y = y.sum(dim=(- 1))
else:
assert (self.POS_ENCODING == True)
phi = phi.reshape(B, h, (self.out_dim // h), npoint, k)
psi = psi.reshape(B, h, (self.out_dim // h), npoint, k)
attn_map = F.softmax(((phi * psi).reshape(B, self.out_dim, npoint, k) + pos_encoding), dim=(- 1))
y = (attn_map * (alpha + pos_encoding))
y = y.sum(dim=(- 1))
self.register_buffer('attn_map', attn_map.mean(dim=1))
y = self.linear_down(y)
if (self.fps_rate is not None):
input_p_reduced = input_p_fps.transpose(1, 2)
input_x_reduced = y
return (input_p_reduced, input_x_reduced)
else:
input_p_reduced = input_p_fps.transpose(1, 2)
input_x_reduced = (y + res)
return (input_p_reduced, input_x_reduced) | input_p: B, 3, npoint
input_x: B, in_dim, npoint | models_dev/pct_utils.py | forward | A-suozhang/SpatioTemporalSegmentation-ScanNet | 1 | python | def forward(self, input_p, input_x):
'\n input_p: B, 3, npoint\n input_x: B, in_dim, npoint\n '
(B, in_dim, npoint) = list(input_x.size())
n_sample = self.n_sample
k = min(n_sample, npoint)
if (not self.use_vector_attn):
h = self.nhead
input_p = input_p.permute([0, 2, 1])
self.register_buffer('in_xyz_map', input_p)
if (self.fps_rate is not None):
npoint = (npoint // self.fps_rate)
fps_idx = farthest_point_sample_cuda(input_p, npoint)
torch.cuda.empty_cache()
input_p_fps = index_points_cuda(input_p, fps_idx)
if self.SKIP_ALL:
input_p_reduced = input_p_fps.transpose(1, 2)
input_x_reduced = index_points_cuda(self.tmp_linear(input_x).transpose(1, 2), fps_idx).transpose(1, 2)
return (input_p_reduced, input_x_reduced)
else:
input_p_fps = input_p
input_x_fps = input_x
res = input_x
if self.USE_KNN:
self.knn = KNN(k=k, transpose_mode=True)
(_, idx) = self.knn(input_p.contiguous(), input_p_fps.contiguous())
idx = idx.int()
else:
idx = query_ball_point_cuda(self.radius, k, input_p.contiguous(), input_p_fps.contiguous())
grouped_input_p = grouping_operation_cuda(input_p.transpose(1, 2).contiguous(), idx)
grouped_input_x = grouping_operation_cuda(input_x.contiguous(), idx)
self.register_buffer('neighbor_map', idx)
if (self.fps_rate is not None):
if self.SKIP_ATTN:
pass
else:
input_x = self.linear_top(input_x)
elif self.SKIP_ATTN:
pass
else:
input_x = self.linear_top(input_x)
if self.SKIP_ATTN:
if self.POS_ENCODING:
relative_xyz = (input_p_fps.permute([0, 2, 1])[(:, :, :, None)] - grouped_input_p)
pos_encoding = self.delta(relative_xyz)
if self.CAT_POS:
alpha = self.alpha(torch.cat([grouped_input_x, relative_xyz], dim=1))
else:
alpha = self.alpha((grouped_input_x + pos_encoding))
else:
alpha = self.alpha(grouped_input_x)
y = alpha.max(dim=(- 1))[0]
y = self.linear_down(y)
if (self.fps_rate is not None):
input_p_reduced = input_p_fps.transpose(1, 2)
input_x_reduced = y
return (input_p_reduced, input_x_reduced)
else:
input_p_reduced = input_p_fps.transpose(1, 2)
input_x_reduced = (y + res)
return (input_p_reduced, input_x_reduced)
if (self.fps_rate is not None):
input_x_fps = index_points_cuda(input_x.transpose(1, 2), fps_idx).transpose(1, 2)
phi = self.phi(input_x_fps)
else:
phi = self.phi(input_x)
phi = phi[(:, :, :, None)].repeat(1, 1, 1, k)
psi = grouping_operation_cuda(self.psi(input_x).contiguous(), idx)
self.skip_knn = True
alpha = grouping_operation_cuda(self.alpha(input_x).contiguous(), idx)
if self.POS_ENCODING:
relative_xyz = (input_p_fps.permute([0, 2, 1])[(:, :, :, None)] - grouped_input_p)
pos_encoding = self.delta(relative_xyz)
if self.use_vector_attn:
if self.POS_ENCODING:
assert ((self.V_POS_ONLY and self.QK_POS_ONLY) is False)
if self.V_POS_ONLY:
attn_map = F.softmax(self.gamma((phi - psi)), dim=(- 1))
else:
attn_map = F.softmax(self.gamma(((phi - psi) + pos_encoding)), dim=(- 1))
if self.QK_POS_ONLY:
y = (attn_map.repeat(1, (self.out_dim // self.vector_dim), 1, 1) * alpha)
else:
y = (attn_map.repeat(1, (self.out_dim // self.vector_dim), 1, 1) * (alpha + pos_encoding))
else:
attn_map = F.softmax(self.gamma((phi - psi)), dim=(- 1))
y = (attn_map.repeat(1, (self.out_dim // self.vector_dim), 1, 1) * alpha)
if self.MAX_POOL:
y = y.max(dim=(- 1))[0]
else:
y = y.sum(dim=(- 1))
else:
assert (self.POS_ENCODING == True)
phi = phi.reshape(B, h, (self.out_dim // h), npoint, k)
psi = psi.reshape(B, h, (self.out_dim // h), npoint, k)
attn_map = F.softmax(((phi * psi).reshape(B, self.out_dim, npoint, k) + pos_encoding), dim=(- 1))
y = (attn_map * (alpha + pos_encoding))
y = y.sum(dim=(- 1))
self.register_buffer('attn_map', attn_map.mean(dim=1))
y = self.linear_down(y)
if (self.fps_rate is not None):
input_p_reduced = input_p_fps.transpose(1, 2)
input_x_reduced = y
return (input_p_reduced, input_x_reduced)
else:
input_p_reduced = input_p_fps.transpose(1, 2)
input_x_reduced = (y + res)
return (input_p_reduced, input_x_reduced) | def forward(self, input_p, input_x):
'\n input_p: B, 3, npoint\n input_x: B, in_dim, npoint\n '
(B, in_dim, npoint) = list(input_x.size())
n_sample = self.n_sample
k = min(n_sample, npoint)
if (not self.use_vector_attn):
h = self.nhead
input_p = input_p.permute([0, 2, 1])
self.register_buffer('in_xyz_map', input_p)
if (self.fps_rate is not None):
npoint = (npoint // self.fps_rate)
fps_idx = farthest_point_sample_cuda(input_p, npoint)
torch.cuda.empty_cache()
input_p_fps = index_points_cuda(input_p, fps_idx)
if self.SKIP_ALL:
input_p_reduced = input_p_fps.transpose(1, 2)
input_x_reduced = index_points_cuda(self.tmp_linear(input_x).transpose(1, 2), fps_idx).transpose(1, 2)
return (input_p_reduced, input_x_reduced)
else:
input_p_fps = input_p
input_x_fps = input_x
res = input_x
if self.USE_KNN:
self.knn = KNN(k=k, transpose_mode=True)
(_, idx) = self.knn(input_p.contiguous(), input_p_fps.contiguous())
idx = idx.int()
else:
idx = query_ball_point_cuda(self.radius, k, input_p.contiguous(), input_p_fps.contiguous())
grouped_input_p = grouping_operation_cuda(input_p.transpose(1, 2).contiguous(), idx)
grouped_input_x = grouping_operation_cuda(input_x.contiguous(), idx)
self.register_buffer('neighbor_map', idx)
if (self.fps_rate is not None):
if self.SKIP_ATTN:
pass
else:
input_x = self.linear_top(input_x)
elif self.SKIP_ATTN:
pass
else:
input_x = self.linear_top(input_x)
if self.SKIP_ATTN:
if self.POS_ENCODING:
relative_xyz = (input_p_fps.permute([0, 2, 1])[(:, :, :, None)] - grouped_input_p)
pos_encoding = self.delta(relative_xyz)
if self.CAT_POS:
alpha = self.alpha(torch.cat([grouped_input_x, relative_xyz], dim=1))
else:
alpha = self.alpha((grouped_input_x + pos_encoding))
else:
alpha = self.alpha(grouped_input_x)
y = alpha.max(dim=(- 1))[0]
y = self.linear_down(y)
if (self.fps_rate is not None):
input_p_reduced = input_p_fps.transpose(1, 2)
input_x_reduced = y
return (input_p_reduced, input_x_reduced)
else:
input_p_reduced = input_p_fps.transpose(1, 2)
input_x_reduced = (y + res)
return (input_p_reduced, input_x_reduced)
if (self.fps_rate is not None):
input_x_fps = index_points_cuda(input_x.transpose(1, 2), fps_idx).transpose(1, 2)
phi = self.phi(input_x_fps)
else:
phi = self.phi(input_x)
phi = phi[(:, :, :, None)].repeat(1, 1, 1, k)
psi = grouping_operation_cuda(self.psi(input_x).contiguous(), idx)
self.skip_knn = True
alpha = grouping_operation_cuda(self.alpha(input_x).contiguous(), idx)
if self.POS_ENCODING:
relative_xyz = (input_p_fps.permute([0, 2, 1])[(:, :, :, None)] - grouped_input_p)
pos_encoding = self.delta(relative_xyz)
if self.use_vector_attn:
if self.POS_ENCODING:
assert ((self.V_POS_ONLY and self.QK_POS_ONLY) is False)
if self.V_POS_ONLY:
attn_map = F.softmax(self.gamma((phi - psi)), dim=(- 1))
else:
attn_map = F.softmax(self.gamma(((phi - psi) + pos_encoding)), dim=(- 1))
if self.QK_POS_ONLY:
y = (attn_map.repeat(1, (self.out_dim // self.vector_dim), 1, 1) * alpha)
else:
y = (attn_map.repeat(1, (self.out_dim // self.vector_dim), 1, 1) * (alpha + pos_encoding))
else:
attn_map = F.softmax(self.gamma((phi - psi)), dim=(- 1))
y = (attn_map.repeat(1, (self.out_dim // self.vector_dim), 1, 1) * alpha)
if self.MAX_POOL:
y = y.max(dim=(- 1))[0]
else:
y = y.sum(dim=(- 1))
else:
assert (self.POS_ENCODING == True)
phi = phi.reshape(B, h, (self.out_dim // h), npoint, k)
psi = psi.reshape(B, h, (self.out_dim // h), npoint, k)
attn_map = F.softmax(((phi * psi).reshape(B, self.out_dim, npoint, k) + pos_encoding), dim=(- 1))
y = (attn_map * (alpha + pos_encoding))
y = y.sum(dim=(- 1))
self.register_buffer('attn_map', attn_map.mean(dim=1))
y = self.linear_down(y)
if (self.fps_rate is not None):
input_p_reduced = input_p_fps.transpose(1, 2)
input_x_reduced = y
return (input_p_reduced, input_x_reduced)
else:
input_p_reduced = input_p_fps.transpose(1, 2)
input_x_reduced = (y + res)
return (input_p_reduced, input_x_reduced)<|docstring|>input_p: B, 3, npoint
input_x: B, in_dim, npoint<|endoftext|> |
f6093df8b5e98f14fc609651ec46b38bba6b7fe9fd7eac33e565335a5c801be0 | def update_resource_specs():
' Update Resource Specs '
for (region, url) in SPEC_REGIONS.items():
filename = pkg_resources.resource_filename(__name__, ('/data/CloudSpecs/%s.json' % region))
LOGGER.debug('Downloading template %s into %s', url, filename)
spec = json.loads(get_url_content(url))
spec = patch_spec(spec, 'all')
spec = patch_spec(spec, region)
with open(filename, 'w') as f:
json.dump(spec, f, indent=2, sort_keys=True, separators=(',', ': ')) | Update Resource Specs | src/cfnlint/maintenance.py | update_resource_specs | kylelaker/cfn-python-lint | 1 | python | def update_resource_specs():
' '
for (region, url) in SPEC_REGIONS.items():
filename = pkg_resources.resource_filename(__name__, ('/data/CloudSpecs/%s.json' % region))
LOGGER.debug('Downloading template %s into %s', url, filename)
spec = json.loads(get_url_content(url))
spec = patch_spec(spec, 'all')
spec = patch_spec(spec, region)
with open(filename, 'w') as f:
json.dump(spec, f, indent=2, sort_keys=True, separators=(',', ': ')) | def update_resource_specs():
' '
for (region, url) in SPEC_REGIONS.items():
filename = pkg_resources.resource_filename(__name__, ('/data/CloudSpecs/%s.json' % region))
LOGGER.debug('Downloading template %s into %s', url, filename)
spec = json.loads(get_url_content(url))
spec = patch_spec(spec, 'all')
spec = patch_spec(spec, region)
with open(filename, 'w') as f:
json.dump(spec, f, indent=2, sort_keys=True, separators=(',', ': '))<|docstring|>Update Resource Specs<|endoftext|> |
31131f27f5a5a6d546dff3885f5da60fdd97ebcc1c266da7da2553aa17361066 | def update_documentation(rules):
'Generate documentation'
filename = 'docs/rules.md'
sorted_rules = sorted(rules, key=(lambda obj: obj.id))
data = []
with open(filename, 'r') as origial_file:
line = origial_file.readline()
while line:
data.append(line)
if (line == '## Rules\n'):
break
line = origial_file.readline()
with open(filename, 'w') as new_file:
for line in data:
new_file.write(line)
new_file.write('The following **{}** rules are applied by this linter:\n'.format(len(sorted_rules)))
new_file.write('(_This documentation is generated from the Rules, do not alter this manually_)\n\n')
new_file.write('| Rule ID | Title | Description | Config<br />(Name:Type:Default) | Source | Tags |\n')
new_file.write('| -------- | ----- | ----------- | ---------- | ------ | ---- |\n')
rule_output = '| {0}<a name="{0}"></a> | {1} | {2} | {3} | [Source]({4}) | {5} |\n'
parseerror = cfnlint.rules.ParseError()
tags = ','.join(('`{0}`'.format(tag) for tag in parseerror.tags))
new_file.write(rule_output.format(parseerror.id, parseerror.shortdesc, parseerror.description, '', '', tags))
transformerror = cfnlint.rules.TransformError()
tags = ','.join(('`{0}`'.format(tag) for tag in transformerror.tags))
new_file.write(rule_output.format(transformerror.id, transformerror.shortdesc, transformerror.description, '', '', tags))
ruleerror = cfnlint.rules.RuleError()
tags = ','.join(('`{0}`'.format(tag) for tag in ruleerror.tags))
new_file.write(rule_output.format(ruleerror.id, ruleerror.shortdesc, ruleerror.description, '', '', tags))
experimental_rules = []
for rule in sorted_rules:
if rule.experimental:
experimental_rules.append(rule)
continue
tags = ','.join(('`{0}`'.format(tag) for tag in rule.tags))
config = '<br />'.join(('{0}:{1}:{2}'.format(key, values.get('type'), values.get('default')) for (key, values) in rule.config_definition.items()))
new_file.write(rule_output.format(rule.id, rule.shortdesc, rule.description, config, rule.source_url, tags))
if experimental_rules:
new_file.write('### Experimental rules\n')
new_file.write('| Rule ID | Title | Description | Source | Tags |\n')
new_file.write('| -------- | ----- | ----------- | ------ | ---- |\n')
for rule in experimental_rules:
tags = ','.join(('`{0}`'.format(tag) for tag in rule.tags))
config = '<br />'.join(('{0}:{1}:{2}'.format(key, values.get('type'), values.get('default')) for (key, values) in rule.config_definition.items()))
new_file.write(rule_output.format(rule.id, rule.shortdesc, rule.description, config, rule.source_url, tags)) | Generate documentation | src/cfnlint/maintenance.py | update_documentation | kylelaker/cfn-python-lint | 1 | python | def update_documentation(rules):
filename = 'docs/rules.md'
sorted_rules = sorted(rules, key=(lambda obj: obj.id))
data = []
with open(filename, 'r') as origial_file:
line = origial_file.readline()
while line:
data.append(line)
if (line == '## Rules\n'):
break
line = origial_file.readline()
with open(filename, 'w') as new_file:
for line in data:
new_file.write(line)
new_file.write('The following **{}** rules are applied by this linter:\n'.format(len(sorted_rules)))
new_file.write('(_This documentation is generated from the Rules, do not alter this manually_)\n\n')
new_file.write('| Rule ID | Title | Description | Config<br />(Name:Type:Default) | Source | Tags |\n')
new_file.write('| -------- | ----- | ----------- | ---------- | ------ | ---- |\n')
rule_output = '| {0}<a name="{0}"></a> | {1} | {2} | {3} | [Source]({4}) | {5} |\n'
parseerror = cfnlint.rules.ParseError()
tags = ','.join(('`{0}`'.format(tag) for tag in parseerror.tags))
new_file.write(rule_output.format(parseerror.id, parseerror.shortdesc, parseerror.description, , , tags))
transformerror = cfnlint.rules.TransformError()
tags = ','.join(('`{0}`'.format(tag) for tag in transformerror.tags))
new_file.write(rule_output.format(transformerror.id, transformerror.shortdesc, transformerror.description, , , tags))
ruleerror = cfnlint.rules.RuleError()
tags = ','.join(('`{0}`'.format(tag) for tag in ruleerror.tags))
new_file.write(rule_output.format(ruleerror.id, ruleerror.shortdesc, ruleerror.description, , , tags))
experimental_rules = []
for rule in sorted_rules:
if rule.experimental:
experimental_rules.append(rule)
continue
tags = ','.join(('`{0}`'.format(tag) for tag in rule.tags))
config = '<br />'.join(('{0}:{1}:{2}'.format(key, values.get('type'), values.get('default')) for (key, values) in rule.config_definition.items()))
new_file.write(rule_output.format(rule.id, rule.shortdesc, rule.description, config, rule.source_url, tags))
if experimental_rules:
new_file.write('### Experimental rules\n')
new_file.write('| Rule ID | Title | Description | Source | Tags |\n')
new_file.write('| -------- | ----- | ----------- | ------ | ---- |\n')
for rule in experimental_rules:
tags = ','.join(('`{0}`'.format(tag) for tag in rule.tags))
config = '<br />'.join(('{0}:{1}:{2}'.format(key, values.get('type'), values.get('default')) for (key, values) in rule.config_definition.items()))
new_file.write(rule_output.format(rule.id, rule.shortdesc, rule.description, config, rule.source_url, tags)) | def update_documentation(rules):
filename = 'docs/rules.md'
sorted_rules = sorted(rules, key=(lambda obj: obj.id))
data = []
with open(filename, 'r') as origial_file:
line = origial_file.readline()
while line:
data.append(line)
if (line == '## Rules\n'):
break
line = origial_file.readline()
with open(filename, 'w') as new_file:
for line in data:
new_file.write(line)
new_file.write('The following **{}** rules are applied by this linter:\n'.format(len(sorted_rules)))
new_file.write('(_This documentation is generated from the Rules, do not alter this manually_)\n\n')
new_file.write('| Rule ID | Title | Description | Config<br />(Name:Type:Default) | Source | Tags |\n')
new_file.write('| -------- | ----- | ----------- | ---------- | ------ | ---- |\n')
rule_output = '| {0}<a name="{0}"></a> | {1} | {2} | {3} | [Source]({4}) | {5} |\n'
parseerror = cfnlint.rules.ParseError()
tags = ','.join(('`{0}`'.format(tag) for tag in parseerror.tags))
new_file.write(rule_output.format(parseerror.id, parseerror.shortdesc, parseerror.description, , , tags))
transformerror = cfnlint.rules.TransformError()
tags = ','.join(('`{0}`'.format(tag) for tag in transformerror.tags))
new_file.write(rule_output.format(transformerror.id, transformerror.shortdesc, transformerror.description, , , tags))
ruleerror = cfnlint.rules.RuleError()
tags = ','.join(('`{0}`'.format(tag) for tag in ruleerror.tags))
new_file.write(rule_output.format(ruleerror.id, ruleerror.shortdesc, ruleerror.description, , , tags))
experimental_rules = []
for rule in sorted_rules:
if rule.experimental:
experimental_rules.append(rule)
continue
tags = ','.join(('`{0}`'.format(tag) for tag in rule.tags))
config = '<br />'.join(('{0}:{1}:{2}'.format(key, values.get('type'), values.get('default')) for (key, values) in rule.config_definition.items()))
new_file.write(rule_output.format(rule.id, rule.shortdesc, rule.description, config, rule.source_url, tags))
if experimental_rules:
new_file.write('### Experimental rules\n')
new_file.write('| Rule ID | Title | Description | Source | Tags |\n')
new_file.write('| -------- | ----- | ----------- | ------ | ---- |\n')
for rule in experimental_rules:
tags = ','.join(('`{0}`'.format(tag) for tag in rule.tags))
config = '<br />'.join(('{0}:{1}:{2}'.format(key, values.get('type'), values.get('default')) for (key, values) in rule.config_definition.items()))
new_file.write(rule_output.format(rule.id, rule.shortdesc, rule.description, config, rule.source_url, tags))<|docstring|>Generate documentation<|endoftext|> |
ab11f652479f1a71fe864b93ed7acb28fa6bce5fdd930ce9b8853b0e8c5c6d9d | def patch_spec(content, region):
'Patch the spec file'
LOGGER.info('Patching spec file for region "%s"', region)
append_dir = os.path.join(os.path.dirname(__file__), 'data', 'ExtendedSpecs', region)
for (dirpath, _, filenames) in os.walk(append_dir):
filenames.sort()
for filename in fnmatch.filter(filenames, '*.json'):
file_path = os.path.join(dirpath, filename).replace(append_dir, '')
LOGGER.info('Processing %s%s', region, file_path)
all_patches = jsonpatch.JsonPatch(cfnlint.helpers.load_resources('data/ExtendedSpecs/{}{}'.format(region, file_path)))
for all_patch in all_patches:
try:
jsonpatch.JsonPatch([all_patch]).apply(content, in_place=True)
except jsonpatch.JsonPatchConflict:
LOGGER.debug('Patch (%s) not applied in region %s', all_patch, region)
except jsonpointer.JsonPointerException:
LOGGER.debug('Parent element not found for patch (%s) in region %s', all_patch, region)
return content | Patch the spec file | src/cfnlint/maintenance.py | patch_spec | kylelaker/cfn-python-lint | 1 | python | def patch_spec(content, region):
LOGGER.info('Patching spec file for region "%s"', region)
append_dir = os.path.join(os.path.dirname(__file__), 'data', 'ExtendedSpecs', region)
for (dirpath, _, filenames) in os.walk(append_dir):
filenames.sort()
for filename in fnmatch.filter(filenames, '*.json'):
file_path = os.path.join(dirpath, filename).replace(append_dir, )
LOGGER.info('Processing %s%s', region, file_path)
all_patches = jsonpatch.JsonPatch(cfnlint.helpers.load_resources('data/ExtendedSpecs/{}{}'.format(region, file_path)))
for all_patch in all_patches:
try:
jsonpatch.JsonPatch([all_patch]).apply(content, in_place=True)
except jsonpatch.JsonPatchConflict:
LOGGER.debug('Patch (%s) not applied in region %s', all_patch, region)
except jsonpointer.JsonPointerException:
LOGGER.debug('Parent element not found for patch (%s) in region %s', all_patch, region)
return content | def patch_spec(content, region):
LOGGER.info('Patching spec file for region "%s"', region)
append_dir = os.path.join(os.path.dirname(__file__), 'data', 'ExtendedSpecs', region)
for (dirpath, _, filenames) in os.walk(append_dir):
filenames.sort()
for filename in fnmatch.filter(filenames, '*.json'):
file_path = os.path.join(dirpath, filename).replace(append_dir, )
LOGGER.info('Processing %s%s', region, file_path)
all_patches = jsonpatch.JsonPatch(cfnlint.helpers.load_resources('data/ExtendedSpecs/{}{}'.format(region, file_path)))
for all_patch in all_patches:
try:
jsonpatch.JsonPatch([all_patch]).apply(content, in_place=True)
except jsonpatch.JsonPatchConflict:
LOGGER.debug('Patch (%s) not applied in region %s', all_patch, region)
except jsonpointer.JsonPointerException:
LOGGER.debug('Parent element not found for patch (%s) in region %s', all_patch, region)
return content<|docstring|>Patch the spec file<|endoftext|> |
be1ad0d956bda70a531b4c9a2371bb84c7cf25db45ad53491deac7f53e56eaac | def update_iam_policies():
'update iam policies file'
url = 'https://awspolicygen.s3.amazonaws.com/js/policies.js'
filename = pkg_resources.resource_filename(__name__, '/data/AdditionalSpecs/Policies.json')
LOGGER.debug('Downloading policies %s into %s', url, filename)
content = get_url_content(url)
content = content.split('app.PolicyEditorConfig=')[1]
content = json.loads(content)
content['serviceMap']['Manage Amazon API Gateway']['Actions'].extend(['HEAD', 'OPTIONS'])
content['serviceMap']['Amazon Kinesis Video Streams']['Actions'].append('StartStreamEncryption')
with open(filename, 'w') as f:
json.dump(content, f, indent=2, sort_keys=True, separators=(',', ': ')) | update iam policies file | src/cfnlint/maintenance.py | update_iam_policies | kylelaker/cfn-python-lint | 1 | python | def update_iam_policies():
url = 'https://awspolicygen.s3.amazonaws.com/js/policies.js'
filename = pkg_resources.resource_filename(__name__, '/data/AdditionalSpecs/Policies.json')
LOGGER.debug('Downloading policies %s into %s', url, filename)
content = get_url_content(url)
content = content.split('app.PolicyEditorConfig=')[1]
content = json.loads(content)
content['serviceMap']['Manage Amazon API Gateway']['Actions'].extend(['HEAD', 'OPTIONS'])
content['serviceMap']['Amazon Kinesis Video Streams']['Actions'].append('StartStreamEncryption')
with open(filename, 'w') as f:
json.dump(content, f, indent=2, sort_keys=True, separators=(',', ': ')) | def update_iam_policies():
url = 'https://awspolicygen.s3.amazonaws.com/js/policies.js'
filename = pkg_resources.resource_filename(__name__, '/data/AdditionalSpecs/Policies.json')
LOGGER.debug('Downloading policies %s into %s', url, filename)
content = get_url_content(url)
content = content.split('app.PolicyEditorConfig=')[1]
content = json.loads(content)
content['serviceMap']['Manage Amazon API Gateway']['Actions'].extend(['HEAD', 'OPTIONS'])
content['serviceMap']['Amazon Kinesis Video Streams']['Actions'].append('StartStreamEncryption')
with open(filename, 'w') as f:
json.dump(content, f, indent=2, sort_keys=True, separators=(',', ': '))<|docstring|>update iam policies file<|endoftext|> |
c55678297c13943e93755eb5c0965fde9b0da93f8bb07d333802a03c5e85c702 | def launch_ec(self, argStr='', input='', flag=''):
'Dispatches command to ec (loaded as a module).\n '
command = 'python -m ec tests/targets/simple.py'
if flag:
command += (' %s' % flag)
if argStr:
command += (' %s' % argStr)
return shell_exec(command, input=input) | Dispatches command to ec (loaded as a module). | tests/test_module_launch.py | launch_ec | Laufire/ec | 2 | python | def launch_ec(self, argStr=, input=, flag=):
'\n '
command = 'python -m ec tests/targets/simple.py'
if flag:
command += (' %s' % flag)
if argStr:
command += (' %s' % argStr)
return shell_exec(command, input=input) | def launch_ec(self, argStr=, input=, flag=):
'\n '
command = 'python -m ec tests/targets/simple.py'
if flag:
command += (' %s' % flag)
if argStr:
command += (' %s' % argStr)
return shell_exec(command, input=input)<|docstring|>Dispatches command to ec (loaded as a module).<|endoftext|> |
1708a75b33f644cae84364e7f120b0ad2d3a0784b0e4b5498fbc87961adb8d3c | def adjust_learning_rate(optimizer, epoch, args):
'Sets the learning rate to the initial LR decayed by 10 every 30 epochs'
lr = (args.lr * (0.1 ** (epoch // 30)))
for param_group in optimizer.param_groups:
param_group['lr'] = lr | Sets the learning rate to the initial LR decayed by 10 every 30 epochs | src/legacy/experiment.py | adjust_learning_rate | spencerpomme/GSPNet | 0 | python | def adjust_learning_rate(optimizer, epoch, args):
lr = (args.lr * (0.1 ** (epoch // 30)))
for param_group in optimizer.param_groups:
param_group['lr'] = lr | def adjust_learning_rate(optimizer, epoch, args):
lr = (args.lr * (0.1 ** (epoch // 30)))
for param_group in optimizer.param_groups:
param_group['lr'] = lr<|docstring|>Sets the learning rate to the initial LR decayed by 10 every 30 epochs<|endoftext|> |
9ac58a8c4ae439d17a2757c8892bef929aedaf58ff23f1ab2a5e08670d55f6f2 | def accuracy(output, target, topk=(1,)):
'Computes the accuracy over the k top predictions for the specified values of k'
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / batch_size)))
return res | Computes the accuracy over the k top predictions for the specified values of k | src/legacy/experiment.py | accuracy | spencerpomme/GSPNet | 0 | python | def accuracy(output, target, topk=(1,)):
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / batch_size)))
return res | def accuracy(output, target, topk=(1,)):
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / batch_size)))
return res<|docstring|>Computes the accuracy over the k top predictions for the specified values of k<|endoftext|> |
71ad4f0b87dc21dad6857779ebf3c2e24046f629824b93447a5988cf6c009215 | def main(args):
'Entry point.\n \n :param args: Parsed CLI arguments.\n\n '
network_id = factory.create_network_id(args.network)
network = cache.infra.get_network(network_id)
if (network is None):
raise ValueError('Unregistered network.')
if (network.faucet is None):
raise ValueError('Unregistered network faucet.')
account = network.faucet
for contract in clx.contracts.CONTRACTS_BY_HASH:
_install_contract(network, account, contract)
utils.log(f'client contracts for network {args.network} were successfully installed') | Entry point.
:param args: Parsed CLI arguments. | sh/scripts/chain_set_contracts.py | main | dwerner/stests | 4 | python | def main(args):
'Entry point.\n \n :param args: Parsed CLI arguments.\n\n '
network_id = factory.create_network_id(args.network)
network = cache.infra.get_network(network_id)
if (network is None):
raise ValueError('Unregistered network.')
if (network.faucet is None):
raise ValueError('Unregistered network faucet.')
account = network.faucet
for contract in clx.contracts.CONTRACTS_BY_HASH:
_install_contract(network, account, contract)
utils.log(f'client contracts for network {args.network} were successfully installed') | def main(args):
'Entry point.\n \n :param args: Parsed CLI arguments.\n\n '
network_id = factory.create_network_id(args.network)
network = cache.infra.get_network(network_id)
if (network is None):
raise ValueError('Unregistered network.')
if (network.faucet is None):
raise ValueError('Unregistered network faucet.')
account = network.faucet
for contract in clx.contracts.CONTRACTS_BY_HASH:
_install_contract(network, account, contract)
utils.log(f'client contracts for network {args.network} were successfully installed')<|docstring|>Entry point.
:param args: Parsed CLI arguments.<|endoftext|> |
bb233fe04a7a4d3ab45281e0456c617b7291b32e5d555a2dc51bf630e1af5d6d | def _install_contract(network: Network, account: Account, contract: typing.Callable):
'Installs a smart contract upon target network.\n \n '
utils.log(f'{contract.WASM} :: installation starts ... please wait')
(node, deploy_hash, _, _) = contract.install(network, account)
utils.log(f'{contract.WASM} :: deploy dispatched >- {deploy_hash}')
block_hash = clx.await_deploy_processing(node, deploy_hash)
utils.log(f'{contract.WASM} :: deploy processed at block {block_hash}')
keys = clx.contracts.get_named_keys(node, account, block_hash, contract.NKEYS)
for (key_name, key_hash) in keys:
cache.infra.set_named_key(factory.create_named_key(account, contract.TYPE, key_name, key_hash))
utils.log(f'{contract.WASM} :: named key -> {key_hash} : {key_name}') | Installs a smart contract upon target network. | sh/scripts/chain_set_contracts.py | _install_contract | dwerner/stests | 4 | python | def _install_contract(network: Network, account: Account, contract: typing.Callable):
'\n \n '
utils.log(f'{contract.WASM} :: installation starts ... please wait')
(node, deploy_hash, _, _) = contract.install(network, account)
utils.log(f'{contract.WASM} :: deploy dispatched >- {deploy_hash}')
block_hash = clx.await_deploy_processing(node, deploy_hash)
utils.log(f'{contract.WASM} :: deploy processed at block {block_hash}')
keys = clx.contracts.get_named_keys(node, account, block_hash, contract.NKEYS)
for (key_name, key_hash) in keys:
cache.infra.set_named_key(factory.create_named_key(account, contract.TYPE, key_name, key_hash))
utils.log(f'{contract.WASM} :: named key -> {key_hash} : {key_name}') | def _install_contract(network: Network, account: Account, contract: typing.Callable):
'\n \n '
utils.log(f'{contract.WASM} :: installation starts ... please wait')
(node, deploy_hash, _, _) = contract.install(network, account)
utils.log(f'{contract.WASM} :: deploy dispatched >- {deploy_hash}')
block_hash = clx.await_deploy_processing(node, deploy_hash)
utils.log(f'{contract.WASM} :: deploy processed at block {block_hash}')
keys = clx.contracts.get_named_keys(node, account, block_hash, contract.NKEYS)
for (key_name, key_hash) in keys:
cache.infra.set_named_key(factory.create_named_key(account, contract.TYPE, key_name, key_hash))
utils.log(f'{contract.WASM} :: named key -> {key_hash} : {key_name}')<|docstring|>Installs a smart contract upon target network.<|endoftext|> |
12b59bf7d9e91a40668f7211c50f08075ff986b7796706ecea143141a3a67c99 | def pay_for_fa_course(self, course_id, status=Order.FULFILLED):
'\n Helper function to pay for a financial aid course\n '
order = OrderFactory.create(user=self.user, status=status)
return LineFactory.create(order=order, course_key=course_id) | Helper function to pay for a financial aid course | dashboard/utils_test.py | pay_for_fa_course | mitodl/micromasters | 32 | python | def pay_for_fa_course(self, course_id, status=Order.FULFILLED):
'\n \n '
order = OrderFactory.create(user=self.user, status=status)
return LineFactory.create(order=order, course_key=course_id) | def pay_for_fa_course(self, course_id, status=Order.FULFILLED):
'\n \n '
order = OrderFactory.create(user=self.user, status=status)
return LineFactory.create(order=order, course_key=course_id)<|docstring|>Helper function to pay for a financial aid course<|endoftext|> |
43cee51af1771bc2942a7b3200af301723d868d0c4cd8f2304947686e0d3f05e | def test_init_normal_track(self):
'\n Test of the init of the class for programs without financial aid\n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.user == self.user)
assert (mmtrack.program == self.program)
assert (mmtrack.enrollments == self.cached_edx_user_data.enrollments)
assert (mmtrack.current_grades == self.cached_edx_user_data.current_grades)
assert (mmtrack.certificates == self.cached_edx_user_data.certificates)
assert (mmtrack.financial_aid_available == self.program.financial_aid_availability)
assert (mmtrack.edx_course_keys == {'course-v1:edX+DemoX+Demo_Course', 'course-v1:MITx+8.MechCX+2014_T1', 'course-v1:odl+FOO102+CR-FALL16'})
assert (mmtrack.paid_course_fa == {}) | Test of the init of the class for programs without financial aid | dashboard/utils_test.py | test_init_normal_track | mitodl/micromasters | 32 | python | def test_init_normal_track(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.user == self.user)
assert (mmtrack.program == self.program)
assert (mmtrack.enrollments == self.cached_edx_user_data.enrollments)
assert (mmtrack.current_grades == self.cached_edx_user_data.current_grades)
assert (mmtrack.certificates == self.cached_edx_user_data.certificates)
assert (mmtrack.financial_aid_available == self.program.financial_aid_availability)
assert (mmtrack.edx_course_keys == {'course-v1:edX+DemoX+Demo_Course', 'course-v1:MITx+8.MechCX+2014_T1', 'course-v1:odl+FOO102+CR-FALL16'})
assert (mmtrack.paid_course_fa == {}) | def test_init_normal_track(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.user == self.user)
assert (mmtrack.program == self.program)
assert (mmtrack.enrollments == self.cached_edx_user_data.enrollments)
assert (mmtrack.current_grades == self.cached_edx_user_data.current_grades)
assert (mmtrack.certificates == self.cached_edx_user_data.certificates)
assert (mmtrack.financial_aid_available == self.program.financial_aid_availability)
assert (mmtrack.edx_course_keys == {'course-v1:edX+DemoX+Demo_Course', 'course-v1:MITx+8.MechCX+2014_T1', 'course-v1:odl+FOO102+CR-FALL16'})
assert (mmtrack.paid_course_fa == {})<|docstring|>Test of the init of the class for programs without financial aid<|endoftext|> |
95fc4f9d2bd1432ab6c4c985cfb24abea9895131d376fa115ac82d10adc10c35 | def test_init_financial_aid_track(self):
'\n Test of the init of the class for programs with financial aid\n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.user == self.user)
assert (mmtrack.program == self.program_financial_aid)
assert (mmtrack.enrollments == self.cached_edx_user_data.enrollments)
assert (mmtrack.current_grades == self.cached_edx_user_data.current_grades)
assert (mmtrack.certificates == self.cached_edx_user_data.certificates)
assert (mmtrack.financial_aid_available == self.program_financial_aid.financial_aid_availability)
assert (mmtrack.edx_course_keys == {self.crun_fa.edx_course_key, self.crun_fa2.edx_course_key})
assert (mmtrack.paid_course_fa == {self.crun_fa.course.id: False}) | Test of the init of the class for programs with financial aid | dashboard/utils_test.py | test_init_financial_aid_track | mitodl/micromasters | 32 | python | def test_init_financial_aid_track(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.user == self.user)
assert (mmtrack.program == self.program_financial_aid)
assert (mmtrack.enrollments == self.cached_edx_user_data.enrollments)
assert (mmtrack.current_grades == self.cached_edx_user_data.current_grades)
assert (mmtrack.certificates == self.cached_edx_user_data.certificates)
assert (mmtrack.financial_aid_available == self.program_financial_aid.financial_aid_availability)
assert (mmtrack.edx_course_keys == {self.crun_fa.edx_course_key, self.crun_fa2.edx_course_key})
assert (mmtrack.paid_course_fa == {self.crun_fa.course.id: False}) | def test_init_financial_aid_track(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.user == self.user)
assert (mmtrack.program == self.program_financial_aid)
assert (mmtrack.enrollments == self.cached_edx_user_data.enrollments)
assert (mmtrack.current_grades == self.cached_edx_user_data.current_grades)
assert (mmtrack.certificates == self.cached_edx_user_data.certificates)
assert (mmtrack.financial_aid_available == self.program_financial_aid.financial_aid_availability)
assert (mmtrack.edx_course_keys == {self.crun_fa.edx_course_key, self.crun_fa2.edx_course_key})
assert (mmtrack.paid_course_fa == {self.crun_fa.course.id: False})<|docstring|>Test of the init of the class for programs with financial aid<|endoftext|> |
bfbf386aea09a3d98742b78ffd58b9f6df568af2b53eb59e28975fddb5eddbb8 | @ddt.data(Order.FULFILLED, Order.PARTIALLY_REFUNDED)
def test_fa_paid(self, order_status):
'\n Test that for financial aid, mmtrack.paid_course_ids only apply to the user with a matching Order\n '
key = 'course-v1:odl+FOO101+CR-FALL15'
self.pay_for_fa_course(key, status=order_status)
mmtrack_paid = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack_paid.paid_course_fa == {self.crun_fa.course.id: True})
mmtrack = MMTrack(user=UserFactory.create(), program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.paid_course_fa == {self.crun_fa.course.id: False}) | Test that for financial aid, mmtrack.paid_course_ids only apply to the user with a matching Order | dashboard/utils_test.py | test_fa_paid | mitodl/micromasters | 32 | python | @ddt.data(Order.FULFILLED, Order.PARTIALLY_REFUNDED)
def test_fa_paid(self, order_status):
'\n \n '
key = 'course-v1:odl+FOO101+CR-FALL15'
self.pay_for_fa_course(key, status=order_status)
mmtrack_paid = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack_paid.paid_course_fa == {self.crun_fa.course.id: True})
mmtrack = MMTrack(user=UserFactory.create(), program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.paid_course_fa == {self.crun_fa.course.id: False}) | @ddt.data(Order.FULFILLED, Order.PARTIALLY_REFUNDED)
def test_fa_paid(self, order_status):
'\n \n '
key = 'course-v1:odl+FOO101+CR-FALL15'
self.pay_for_fa_course(key, status=order_status)
mmtrack_paid = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack_paid.paid_course_fa == {self.crun_fa.course.id: True})
mmtrack = MMTrack(user=UserFactory.create(), program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.paid_course_fa == {self.crun_fa.course.id: False})<|docstring|>Test that for financial aid, mmtrack.paid_course_ids only apply to the user with a matching Order<|endoftext|> |
62a068db347ffaef22228baefc2490d367ede05fa5e28dd563c3adc95d269d91 | def test_is_course_in_program(self):
'\n Test the _is_course_in_program method\n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
for course_id in ['course-v1:edX+DemoX+Demo_Course', 'course-v1:MITx+8.MechCX+2014_T1']:
assert (mmtrack._is_course_in_program(course_id) is True)
assert (mmtrack._is_course_in_program('course-v1:odl+FOO101+CR-FALL15') is False) | Test the _is_course_in_program method | dashboard/utils_test.py | test_is_course_in_program | mitodl/micromasters | 32 | python | def test_is_course_in_program(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
for course_id in ['course-v1:edX+DemoX+Demo_Course', 'course-v1:MITx+8.MechCX+2014_T1']:
assert (mmtrack._is_course_in_program(course_id) is True)
assert (mmtrack._is_course_in_program('course-v1:odl+FOO101+CR-FALL15') is False) | def test_is_course_in_program(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
for course_id in ['course-v1:edX+DemoX+Demo_Course', 'course-v1:MITx+8.MechCX+2014_T1']:
assert (mmtrack._is_course_in_program(course_id) is True)
assert (mmtrack._is_course_in_program('course-v1:odl+FOO101+CR-FALL15') is False)<|docstring|>Test the _is_course_in_program method<|endoftext|> |
042c807dc92caa2f461c5cd269a38b728aa7ada9b4e0304d280bb6d617e53d8f | def test_is_enrolled(self):
'\n Tests for is_enrolled method\n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
for course_id in ['course-v1:edX+DemoX+Demo_Course', 'course-v1:MITx+8.MechCX+2014_T1']:
assert (mmtrack.is_enrolled(course_id) is True)
with patch('edx_api.enrollments.models.Enrollments.is_enrolled_in', return_value=False):
assert (mmtrack.is_enrolled(course_id) is False)
mmtrack_fa = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack_fa.is_enrolled('course-v1:odl+FOO101+CR-FALL15') is True)
with patch('edx_api.enrollments.models.Enrollments.is_enrolled_in', return_value=False):
assert (mmtrack.is_enrolled('course-v1:odl+FOO101+CR-FALL15') is False) | Tests for is_enrolled method | dashboard/utils_test.py | test_is_enrolled | mitodl/micromasters | 32 | python | def test_is_enrolled(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
for course_id in ['course-v1:edX+DemoX+Demo_Course', 'course-v1:MITx+8.MechCX+2014_T1']:
assert (mmtrack.is_enrolled(course_id) is True)
with patch('edx_api.enrollments.models.Enrollments.is_enrolled_in', return_value=False):
assert (mmtrack.is_enrolled(course_id) is False)
mmtrack_fa = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack_fa.is_enrolled('course-v1:odl+FOO101+CR-FALL15') is True)
with patch('edx_api.enrollments.models.Enrollments.is_enrolled_in', return_value=False):
assert (mmtrack.is_enrolled('course-v1:odl+FOO101+CR-FALL15') is False) | def test_is_enrolled(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
for course_id in ['course-v1:edX+DemoX+Demo_Course', 'course-v1:MITx+8.MechCX+2014_T1']:
assert (mmtrack.is_enrolled(course_id) is True)
with patch('edx_api.enrollments.models.Enrollments.is_enrolled_in', return_value=False):
assert (mmtrack.is_enrolled(course_id) is False)
mmtrack_fa = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack_fa.is_enrolled('course-v1:odl+FOO101+CR-FALL15') is True)
with patch('edx_api.enrollments.models.Enrollments.is_enrolled_in', return_value=False):
assert (mmtrack.is_enrolled('course-v1:odl+FOO101+CR-FALL15') is False)<|docstring|>Tests for is_enrolled method<|endoftext|> |
ebad5c43254d3f711574ed7ded63491f767dd190c73a3d04070ce9ef1d20c67d | def test_is_enrolled_mmtrack_normal(self):
'\n Tests for the is_enrolled_mmtrack method in case financial aid is not available\n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.is_enrolled_mmtrack('course-v1:edX+DemoX+Demo_Course') is True)
assert (mmtrack.is_enrolled_mmtrack('course-v1:MITx+8.MechCX+2014_T1') is False) | Tests for the is_enrolled_mmtrack method in case financial aid is not available | dashboard/utils_test.py | test_is_enrolled_mmtrack_normal | mitodl/micromasters | 32 | python | def test_is_enrolled_mmtrack_normal(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.is_enrolled_mmtrack('course-v1:edX+DemoX+Demo_Course') is True)
assert (mmtrack.is_enrolled_mmtrack('course-v1:MITx+8.MechCX+2014_T1') is False) | def test_is_enrolled_mmtrack_normal(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.is_enrolled_mmtrack('course-v1:edX+DemoX+Demo_Course') is True)
assert (mmtrack.is_enrolled_mmtrack('course-v1:MITx+8.MechCX+2014_T1') is False)<|docstring|>Tests for the is_enrolled_mmtrack method in case financial aid is not available<|endoftext|> |
c8d66811996c320b3d582c2c00e2662d1225fd0f017f96a3a1a8cb52ab84e21a | def test_is_enrolled_mmtrack_fa(self):
'\n Tests for the is_enrolled_mmtrack method in case financial aid is available\n '
course_id = 'course-v1:odl+FOO101+CR-FALL15'
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.is_enrolled_mmtrack(course_id) is False)
self.pay_for_fa_course(course_id)
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.is_enrolled_mmtrack(course_id) is True) | Tests for the is_enrolled_mmtrack method in case financial aid is available | dashboard/utils_test.py | test_is_enrolled_mmtrack_fa | mitodl/micromasters | 32 | python | def test_is_enrolled_mmtrack_fa(self):
'\n \n '
course_id = 'course-v1:odl+FOO101+CR-FALL15'
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.is_enrolled_mmtrack(course_id) is False)
self.pay_for_fa_course(course_id)
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.is_enrolled_mmtrack(course_id) is True) | def test_is_enrolled_mmtrack_fa(self):
'\n \n '
course_id = 'course-v1:odl+FOO101+CR-FALL15'
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.is_enrolled_mmtrack(course_id) is False)
self.pay_for_fa_course(course_id)
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.is_enrolled_mmtrack(course_id) is True)<|docstring|>Tests for the is_enrolled_mmtrack method in case financial aid is available<|endoftext|> |
5c041bc3ddba42e951c3f4e7948792f7d9819a1d0d3d55e5a426e1d6f92bf2c4 | @ddt.data(True, False)
def test_has_passed_course(self, final_grade_passed):
'\n Test that has_passed_course returns True when a passed FinalGrade exists\n '
final_grade = FinalGradeFactory.create(user=self.user, course_run=self.cruns[0], passed=final_grade_passed)
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.has_passed_course(final_grade.course_run.edx_course_key) is final_grade_passed) | Test that has_passed_course returns True when a passed FinalGrade exists | dashboard/utils_test.py | test_has_passed_course | mitodl/micromasters | 32 | python | @ddt.data(True, False)
def test_has_passed_course(self, final_grade_passed):
'\n \n '
final_grade = FinalGradeFactory.create(user=self.user, course_run=self.cruns[0], passed=final_grade_passed)
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.has_passed_course(final_grade.course_run.edx_course_key) is final_grade_passed) | @ddt.data(True, False)
def test_has_passed_course(self, final_grade_passed):
'\n \n '
final_grade = FinalGradeFactory.create(user=self.user, course_run=self.cruns[0], passed=final_grade_passed)
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.has_passed_course(final_grade.course_run.edx_course_key) is final_grade_passed)<|docstring|>Test that has_passed_course returns True when a passed FinalGrade exists<|endoftext|> |
41bce83233c438886611b8481b00503658a49e615b714a5ed5ba028e2099ec92 | def test_has_passed_course_no_grade(self):
'\n Test that has_passed_course returns False when no FinalGrade exists\n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.has_passed_course('random-course-id') is False) | Test that has_passed_course returns False when no FinalGrade exists | dashboard/utils_test.py | test_has_passed_course_no_grade | mitodl/micromasters | 32 | python | def test_has_passed_course_no_grade(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.has_passed_course('random-course-id') is False) | def test_has_passed_course_no_grade(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.has_passed_course('random-course-id') is False)<|docstring|>Test that has_passed_course returns False when no FinalGrade exists<|endoftext|> |
486060798b24a52e41592e8da56bf18a92a58dad124b18f2b4eeb536daaeeded | def test_get_final_grade_percent(self):
'\n Test that get_final_grade_percent returns a final grade in percent form\n '
final_grade = FinalGradeFactory.create(user=self.user, course_run=self.cruns[0], grade=0.57)
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (round(mmtrack.get_final_grade_percent(final_grade.course_run.edx_course_key)) == 57.0) | Test that get_final_grade_percent returns a final grade in percent form | dashboard/utils_test.py | test_get_final_grade_percent | mitodl/micromasters | 32 | python | def test_get_final_grade_percent(self):
'\n \n '
final_grade = FinalGradeFactory.create(user=self.user, course_run=self.cruns[0], grade=0.57)
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (round(mmtrack.get_final_grade_percent(final_grade.course_run.edx_course_key)) == 57.0) | def test_get_final_grade_percent(self):
'\n \n '
final_grade = FinalGradeFactory.create(user=self.user, course_run=self.cruns[0], grade=0.57)
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (round(mmtrack.get_final_grade_percent(final_grade.course_run.edx_course_key)) == 57.0)<|docstring|>Test that get_final_grade_percent returns a final grade in percent form<|endoftext|> |
a91e5578dea60dcc213228a66bb57f83468675e60df75e2f261181c09e069aa7 | def test_get_final_grade_percent_none(self):
'\n Test that get_final_grade_percent returns a None when there is no final grade\n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.get_final_grade_percent('random-course-id') is None) | Test that get_final_grade_percent returns a None when there is no final grade | dashboard/utils_test.py | test_get_final_grade_percent_none | mitodl/micromasters | 32 | python | def test_get_final_grade_percent_none(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.get_final_grade_percent('random-course-id') is None) | def test_get_final_grade_percent_none(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.get_final_grade_percent('random-course-id') is None)<|docstring|>Test that get_final_grade_percent returns a None when there is no final grade<|endoftext|> |
9b37bcef114f33f2fa8301ba7014ee1f462e561342179f1300809f7fbe3c1170 | def test_has_final_grade(self):
'\n Test that has_final_grade returns True when a FinalGrade exists\n '
final_grade = FinalGradeFactory.create(user=self.user, course_run=self.cruns[0])
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.has_final_grade(final_grade.course_run.edx_course_key) is True)
assert (mmtrack.has_final_grade('random-course-id') is False) | Test that has_final_grade returns True when a FinalGrade exists | dashboard/utils_test.py | test_has_final_grade | mitodl/micromasters | 32 | python | def test_has_final_grade(self):
'\n \n '
final_grade = FinalGradeFactory.create(user=self.user, course_run=self.cruns[0])
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.has_final_grade(final_grade.course_run.edx_course_key) is True)
assert (mmtrack.has_final_grade('random-course-id') is False) | def test_has_final_grade(self):
'\n \n '
final_grade = FinalGradeFactory.create(user=self.user, course_run=self.cruns[0])
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.has_final_grade(final_grade.course_run.edx_course_key) is True)
assert (mmtrack.has_final_grade('random-course-id') is False)<|docstring|>Test that has_final_grade returns True when a FinalGrade exists<|endoftext|> |
2350b7c3df962500c43d224683bb05e28cdc3adf227a623d6d8280e1623cbe9b | @ddt.data(True, False)
def test_has_paid_final_grade(self, has_paid):
'\n Test that has_paid_final_grade returns True when the associated FinalGrade is paid\n '
final_grade = FinalGradeFactory.create(user=self.user, course_run=self.cruns[0], course_run_paid_on_edx=has_paid)
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.has_paid_final_grade(final_grade.course_run.edx_course_key) is has_paid) | Test that has_paid_final_grade returns True when the associated FinalGrade is paid | dashboard/utils_test.py | test_has_paid_final_grade | mitodl/micromasters | 32 | python | @ddt.data(True, False)
def test_has_paid_final_grade(self, has_paid):
'\n \n '
final_grade = FinalGradeFactory.create(user=self.user, course_run=self.cruns[0], course_run_paid_on_edx=has_paid)
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.has_paid_final_grade(final_grade.course_run.edx_course_key) is has_paid) | @ddt.data(True, False)
def test_has_paid_final_grade(self, has_paid):
'\n \n '
final_grade = FinalGradeFactory.create(user=self.user, course_run=self.cruns[0], course_run_paid_on_edx=has_paid)
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.has_paid_final_grade(final_grade.course_run.edx_course_key) is has_paid)<|docstring|>Test that has_paid_final_grade returns True when the associated FinalGrade is paid<|endoftext|> |
b45b56bd90295e7e3027d272be2b73a2e96ed4eb9abf44b89d7aa0ed1fd9f211 | def test_has_paid_final_grade_none(self):
"\n Test that has_paid_final_grade returns False when a FinalGrade doesn't exist\n "
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.has_paid_final_grade('random-course-id') is False) | Test that has_paid_final_grade returns False when a FinalGrade doesn't exist | dashboard/utils_test.py | test_has_paid_final_grade_none | mitodl/micromasters | 32 | python | def test_has_paid_final_grade_none(self):
"\n \n "
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.has_paid_final_grade('random-course-id') is False) | def test_has_paid_final_grade_none(self):
"\n \n "
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.has_paid_final_grade('random-course-id') is False)<|docstring|>Test that has_paid_final_grade returns False when a FinalGrade doesn't exist<|endoftext|> |
cee7cda375f822042b18678c4fbb4f19c19466e9293a6c7b6e2386aeb1c1608b | def test_get_final_grade(self):
"\n Test that get_final_grade returns the FinalGrade associated with a user's course run\n "
final_grade = FinalGradeFactory.create(user=self.user, course_run=self.cruns[0])
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.get_final_grade(final_grade.course_run.edx_course_key) == final_grade) | Test that get_final_grade returns the FinalGrade associated with a user's course run | dashboard/utils_test.py | test_get_final_grade | mitodl/micromasters | 32 | python | def test_get_final_grade(self):
"\n \n "
final_grade = FinalGradeFactory.create(user=self.user, course_run=self.cruns[0])
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.get_final_grade(final_grade.course_run.edx_course_key) == final_grade) | def test_get_final_grade(self):
"\n \n "
final_grade = FinalGradeFactory.create(user=self.user, course_run=self.cruns[0])
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.get_final_grade(final_grade.course_run.edx_course_key) == final_grade)<|docstring|>Test that get_final_grade returns the FinalGrade associated with a user's course run<|endoftext|> |
04a3d3948c765a3af68ce816d1aaaeee753d5b41f5bacf2c6ace6880f1675c4a | def test_get_final_grade_none(self):
'\n Test for get_final_grade returns None if there is no associated FinalGrade\n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.get_final_grade('random-course-id') is None) | Test for get_final_grade returns None if there is no associated FinalGrade | dashboard/utils_test.py | test_get_final_grade_none | mitodl/micromasters | 32 | python | def test_get_final_grade_none(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.get_final_grade('random-course-id') is None) | def test_get_final_grade_none(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.get_final_grade('random-course-id') is None)<|docstring|>Test for get_final_grade returns None if there is no associated FinalGrade<|endoftext|> |
78c17723230caaf60e50e3e7848a6836524db866827af15ebcd1739cf539010e | def test_get_required_final_grade(self):
"\n Test that get_required_final_grade returns the FinalGrade associated with a user's course run\n "
final_grade = FinalGradeFactory.create(user=self.user, course_run=self.cruns[0])
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.get_required_final_grade(final_grade.course_run.edx_course_key) == final_grade) | Test that get_required_final_grade returns the FinalGrade associated with a user's course run | dashboard/utils_test.py | test_get_required_final_grade | mitodl/micromasters | 32 | python | def test_get_required_final_grade(self):
"\n \n "
final_grade = FinalGradeFactory.create(user=self.user, course_run=self.cruns[0])
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.get_required_final_grade(final_grade.course_run.edx_course_key) == final_grade) | def test_get_required_final_grade(self):
"\n \n "
final_grade = FinalGradeFactory.create(user=self.user, course_run=self.cruns[0])
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.get_required_final_grade(final_grade.course_run.edx_course_key) == final_grade)<|docstring|>Test that get_required_final_grade returns the FinalGrade associated with a user's course run<|endoftext|> |
8c678c1372d8e4e1284334b2a121f99300cd3e5aed558ad7595647400a9545f4 | def test_get_required_final_grade_raises(self):
'\n Test for get_required_final_grade raises an exception if there is no associated FinalGrade\n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
with self.assertRaises(FinalGrade.DoesNotExist):
mmtrack.get_required_final_grade('random-course-id') | Test for get_required_final_grade raises an exception if there is no associated FinalGrade | dashboard/utils_test.py | test_get_required_final_grade_raises | mitodl/micromasters | 32 | python | def test_get_required_final_grade_raises(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
with self.assertRaises(FinalGrade.DoesNotExist):
mmtrack.get_required_final_grade('random-course-id') | def test_get_required_final_grade_raises(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
with self.assertRaises(FinalGrade.DoesNotExist):
mmtrack.get_required_final_grade('random-course-id')<|docstring|>Test for get_required_final_grade raises an exception if there is no associated FinalGrade<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.