index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
4,035 | springserve._demand | _ProgrammaticGuaranteedAPI | null | class _ProgrammaticGuaranteedAPI(_VDAPIService):
__RESPONSE_OBJECT__ = _DemandTagResponse
__API__ = "programmatic_guaranteed"
| () |
4,048 | springserve._common | _PublisherIdListAPI | null | class _PublisherIdListAPI(_VDAPIService):
__API__ = "publisher_id_lists"
__RESPONSE_OBJECT__ = _PublisherIdListResponse
| () |
4,061 | springserve._reporting | _ReportingAPI | null | class _ReportingAPI(_VDAPIService):
__API__ = "report"
__RESPONSES_OBJECT__ = _ReportingResponse
INTERVALS = ("hour", "day", "cumulative")
def _format_date(self, date):
if isinstance(date, datetime):
return date.strftime("%Y-%m-%d")
return date
def _get_report(self, payload):
response = self.post(data=payload)
#something bad happened
if not response.ok:
return response
if 'report_id' not in response.raw:
raise('report_id field not in response: {}'.format(response.raw))
self._report_id = response.raw['report_id']
payload['report_id'] = self._report_id
if 'status' not in response.raw:
raise('status field not in response: {}'.format(response.raw))
#poll the api untill we get a completed report
while response.raw['status'] != 'COMPLETE':
sleep(1)
response = self.post(data=payload)
return response
def build_response(self, api_response, path_params, query_params, payload):
is_ok = api_response.ok
if not is_ok and api_response.status_code == 401:
raise VDAuthError("Need to Re-Auth")
if api_response.status_code == 204: # this means empty
resp_json = {}
else:
resp_json = api_response.json
return self.__RESPONSES_OBJECT__(self, resp_json, path_params, query_params, is_ok, payload)
def run(self, start_date=None, end_date=None, interval=None, dimensions=None,
account_id=None, **kwargs):
"""
parameter options (if applicable) notes
===================================================
start_date: "2015-12-01 00:00:00" or "2015-12-01"
end_date: "2015-12-02 00:00:00" or "2015-12-01"
interval: "hour", "day", "cumulative"
timezone: "UTC", "America/New_York" defaults to America/New_York
date_range: Today, Yesterday, Last 7 Days date_range takes precedence over start_date/end_date
dimensions: supply_tag_id, demand_tag_id, detected_domain, declared_domain, demand_type, supply_type, supply_partner_id, demand_partner_id, supply_group domain is only available when using date_range of Today, Yesterday, or Last 7 Days
the following parameters act as filters; pass an array of values (usually IDs)
=================================================================================
supply_tag_ids: [22423,22375, 25463]
demand_tag_ids: [22423,22375, 25463]
detected_domains: ["nytimes.com", "weather.com"]
declared_domains: ["nytimes.com", "weather.com"]
supply_types ["Syndicated","Third-Party"]
supply_partner_ids: [30,42,41]
supply_group_ids: [13,15,81]
demand_partner_ids: [3,10,81]
demand_types: ["Vast Only","FLASH"]
"""
self.payload = {
'start_date': self._format_date(start_date),
'end_date': self._format_date(end_date),
'report_service': True,
'async': True
}
if interval:
if interval not in self.INTERVALS:
raise Exception("not a valid interval")
self.payload['interval'] = interval
if dimensions:
self.payload['dimensions'] = dimensions
if account_id:
self.payload['account_id'] = account_id
if kwargs:
self.payload.update(kwargs)
return self._get_report(self.payload)
| () |
4,063 | springserve._reporting | _format_date | null | def _format_date(self, date):
if isinstance(date, datetime):
return date.strftime("%Y-%m-%d")
return date
| (self, date) |
4,064 | springserve._reporting | _get_report | null | def _get_report(self, payload):
response = self.post(data=payload)
#something bad happened
if not response.ok:
return response
if 'report_id' not in response.raw:
raise('report_id field not in response: {}'.format(response.raw))
self._report_id = response.raw['report_id']
payload['report_id'] = self._report_id
if 'status' not in response.raw:
raise('status field not in response: {}'.format(response.raw))
#poll the api untill we get a completed report
while response.raw['status'] != 'COMPLETE':
sleep(1)
response = self.post(data=payload)
return response
| (self, payload) |
4,068 | springserve._reporting | build_response | null | def build_response(self, api_response, path_params, query_params, payload):
is_ok = api_response.ok
if not is_ok and api_response.status_code == 401:
raise VDAuthError("Need to Re-Auth")
if api_response.status_code == 204: # this means empty
resp_json = {}
else:
resp_json = api_response.json
return self.__RESPONSES_OBJECT__(self, resp_json, path_params, query_params, is_ok, payload)
| (self, api_response, path_params, query_params, payload) |
4,076 | springserve._reporting | run |
parameter options (if applicable) notes
===================================================
start_date: "2015-12-01 00:00:00" or "2015-12-01"
end_date: "2015-12-02 00:00:00" or "2015-12-01"
interval: "hour", "day", "cumulative"
timezone: "UTC", "America/New_York" defaults to America/New_York
date_range: Today, Yesterday, Last 7 Days date_range takes precedence over start_date/end_date
dimensions: supply_tag_id, demand_tag_id, detected_domain, declared_domain, demand_type, supply_type, supply_partner_id, demand_partner_id, supply_group domain is only available when using date_range of Today, Yesterday, or Last 7 Days
the following parameters act as filters; pass an array of values (usually IDs)
=================================================================================
supply_tag_ids: [22423,22375, 25463]
demand_tag_ids: [22423,22375, 25463]
detected_domains: ["nytimes.com", "weather.com"]
declared_domains: ["nytimes.com", "weather.com"]
supply_types ["Syndicated","Third-Party"]
supply_partner_ids: [30,42,41]
supply_group_ids: [13,15,81]
demand_partner_ids: [3,10,81]
demand_types: ["Vast Only","FLASH"]
| def run(self, start_date=None, end_date=None, interval=None, dimensions=None,
account_id=None, **kwargs):
"""
parameter options (if applicable) notes
===================================================
start_date: "2015-12-01 00:00:00" or "2015-12-01"
end_date: "2015-12-02 00:00:00" or "2015-12-01"
interval: "hour", "day", "cumulative"
timezone: "UTC", "America/New_York" defaults to America/New_York
date_range: Today, Yesterday, Last 7 Days date_range takes precedence over start_date/end_date
dimensions: supply_tag_id, demand_tag_id, detected_domain, declared_domain, demand_type, supply_type, supply_partner_id, demand_partner_id, supply_group domain is only available when using date_range of Today, Yesterday, or Last 7 Days
the following parameters act as filters; pass an array of values (usually IDs)
=================================================================================
supply_tag_ids: [22423,22375, 25463]
demand_tag_ids: [22423,22375, 25463]
detected_domains: ["nytimes.com", "weather.com"]
declared_domains: ["nytimes.com", "weather.com"]
supply_types ["Syndicated","Third-Party"]
supply_partner_ids: [30,42,41]
supply_group_ids: [13,15,81]
demand_partner_ids: [3,10,81]
demand_types: ["Vast Only","FLASH"]
"""
self.payload = {
'start_date': self._format_date(start_date),
'end_date': self._format_date(end_date),
'report_service': True,
'async': True
}
if interval:
if interval not in self.INTERVALS:
raise Exception("not a valid interval")
self.payload['interval'] = interval
if dimensions:
self.payload['dimensions'] = dimensions
if account_id:
self.payload['account_id'] = account_id
if kwargs:
self.payload.update(kwargs)
return self._get_report(self.payload)
| (self, start_date=None, end_date=None, interval=None, dimensions=None, account_id=None, **kwargs) |
4,077 | springserve._common | _SegmentListAPI | null | class _SegmentListAPI(_VDAPIService):
__API__ = "segments"
__RESPONSE_OBJECT__ = _SegmentListResponse
| () |
4,090 | springserve._demand | _SpotxConnectAPI | null | class _SpotxConnectAPI(_VDAPIService):
__RESPONSE_OBJECT__ = _DemandTagResponse
__API__ = "spotx_connects"
| () |
4,103 | springserve._supply | _SupplyLabelAPI | null | class _SupplyLabelAPI(_VDAPIService):
__API__ = "supply_labels"
| () |
4,116 | springserve._supply | _SupplyPartnerAPI | null | class _SupplyPartnerAPI(_VDAPIService):
__API__ = "supply_partners"
| () |
4,129 | springserve._supply | _SupplyRouterAPI | null | class _SupplyRouterAPI(_VDAPIService):
__API__ = "supply_routers"
| () |
4,142 | springserve._supply | _SupplyTagAPI | null | class _SupplyTagAPI(_VDAPIService):
__RESPONSE_OBJECT__ = _VDDuplicateableResponse
__API__ = "supply_tags"
| () |
4,155 | springserve | _TabComplete |
this class exists to make any other class
have a tab completion function that is already
hooked into ipython
| class _TabComplete(object):
"""
this class exists to make any other class
have a tab completion function that is already
hooked into ipython
"""
def _tab_completions(self):
return []
def __dir__(self):
return super(_TabComplete, self).__dir__() + self._tab_completions()
| () |
4,156 | springserve | __dir__ | null | def __dir__(self):
return super(_TabComplete, self).__dir__() + self._tab_completions()
| (self) |
4,157 | springserve | _tab_completions | null | def _tab_completions(self):
return []
| (self) |
4,158 | springserve._demand | _TagCreativeAPI | null | class _TagCreativeAPI(_VDAPIService):
__API__ = "tag_creatives"
| () |
4,171 | springserve._reporting | _TrafficQualityReport | null | class _TrafficQualityReport(_ReportingAPI):
__API__ = "traffic_quality_reports"
__RESPONSES_OBJECT__ = _ReportingResponse
| () |
4,187 | springserve._account | _UserAPI | null | class _UserAPI(_VDAPIService):
__API__ = "users"
| () |
4,200 | springserve | _VDAPIMultiResponse | null | class _VDAPIMultiResponse(_VDAPIResponse):
def __init__(self, service, api_response_data, path_params, query_params,
response_object, ok, payload='', injected_account_id=None):
super(_VDAPIMultiResponse, self).__init__(service, api_response_data,
path_params, query_params, ok, payload)
self._payload = payload
self._object_cache = []
self._current_page = 1
self._all_pages_gotten = False
self._injected_account_id = injected_account_id
self.response_object = response_object
# build out the initial set of objects
self._build_cache(self.raw)
def _build_cache(self, objects):
self._object_cache.extend([self._build_response_object(x) for x in
objects])
def _is_last_page(self, resp):
return (not resp or not resp.json)
def _get_next_page(self):
if self._all_pages_gotten:
return
params = self._query_params.copy()
params['page'] = self._current_page+1
resp = self._service.get_raw(self._path_params, **params)
# this means we are donesky, we don't know
# how many items there will be, only that we hit the last page
if self._is_last_page(resp):
self._all_pages_gotten = True
return
self._build_cache(resp.json)
self._current_page += 1
def _build_response_object(self, data):
return self.response_object(self._service, data,
self._path_params,
self._query_params,
True,
payload='',
injected_account_id=self._injected_account_id)
def __getitem__(self, key):
if not isinstance(key, int):
raise Exception("Must be an index ")
if key >= len(self._object_cache):
if self._all_pages_gotten:
raise IndexError("All pages gotten, no such object")
self._get_next_page()
return self[key]
return self._object_cache[key]
def __iter__(self):
"""
this will automatically take care of pagination for us.
"""
idx = 0
while True:
# not sure I love this method, but it's the best
# one I can think of right now
try:
yield self[idx]
idx += 1
except IndexError as e:
break
def __len__(self):
return len([x for x in self])
| (service, api_response_data, path_params, query_params, response_object, ok, payload='', injected_account_id=None) |
4,202 | springserve | __getattr__ |
This is where the magic happens that allows you to treat this as an
object that has all of the fields that the api returns. I seperate all
of the returned data in self._data
| def __getattr__(self, key):
"""
This is where the magic happens that allows you to treat this as an
object that has all of the fields that the api returns. I seperate all
of the returned data in self._data
"""
# if it's not there then try to get it as an attribute
try:
return self.__getattribute__(key)
except AttributeError as e:
# makes unpickling work?
if key.startswith("__"):
raise e
return self._raw_response[key]
| (self, key) |
4,203 | springserve | __getitem__ | null | def __getitem__(self, key):
if not isinstance(key, int):
raise Exception("Must be an index ")
if key >= len(self._object_cache):
if self._all_pages_gotten:
raise IndexError("All pages gotten, no such object")
self._get_next_page()
return self[key]
return self._object_cache[key]
| (self, key) |
4,204 | springserve | __init__ | null | def __init__(self, service, api_response_data, path_params, query_params,
response_object, ok, payload='', injected_account_id=None):
super(_VDAPIMultiResponse, self).__init__(service, api_response_data,
path_params, query_params, ok, payload)
self._payload = payload
self._object_cache = []
self._current_page = 1
self._all_pages_gotten = False
self._injected_account_id = injected_account_id
self.response_object = response_object
# build out the initial set of objects
self._build_cache(self.raw)
| (self, service, api_response_data, path_params, query_params, response_object, ok, payload='', injected_account_id=None) |
4,205 | springserve | __iter__ |
this will automatically take care of pagination for us.
| def __iter__(self):
"""
this will automatically take care of pagination for us.
"""
idx = 0
while True:
# not sure I love this method, but it's the best
# one I can think of right now
try:
yield self[idx]
idx += 1
except IndexError as e:
break
| (self) |
4,206 | springserve | __len__ | null | def __len__(self):
return len([x for x in self])
| (self) |
4,207 | springserve | _build_cache | null | def _build_cache(self, objects):
self._object_cache.extend([self._build_response_object(x) for x in
objects])
| (self, objects) |
4,208 | springserve | _build_response_object | null | def _build_response_object(self, data):
return self.response_object(self._service, data,
self._path_params,
self._query_params,
True,
payload='',
injected_account_id=self._injected_account_id)
| (self, data) |
4,209 | springserve | _get_next_page | null | def _get_next_page(self):
if self._all_pages_gotten:
return
params = self._query_params.copy()
params['page'] = self._current_page+1
resp = self._service.get_raw(self._path_params, **params)
# this means we are donesky, we don't know
# how many items there will be, only that we hit the last page
if self._is_last_page(resp):
self._all_pages_gotten = True
return
self._build_cache(resp.json)
self._current_page += 1
| (self) |
4,210 | springserve | _is_last_page | null | def _is_last_page(self, resp):
return (not resp or not resp.json)
| (self, resp) |
4,211 | springserve | _tab_completions | null | def _tab_completions(self):
if not self.raw:
return []
return list(self.raw.keys())
| (self) |
4,212 | springserve | _VDAPIResponse | null | class _VDAPIResponse(_TabComplete):
def __init__(self, service, api_response_data, path_params, query_params,
ok, payload='', injected_account_id=None):
super(_VDAPIResponse, self).__init__()
self._service = service
self._raw_response = api_response_data
self._path_params = path_params
self._query_params = query_params or {}
self._ok = ok
self._payload = payload
self._injected_account_id = injected_account_id
@property
def ok(self):
"""
Tells you if the api response was "ok"
meaning that it responded with a 200. If there was an error, this will
return false
"""
return self._ok
@property
def raw(self):
"""
Gives you the raw json response from the api. Usually you do not need
to call this
"""
return self._raw_response
def __getitem__(self, key):
if isinstance(key, str):
return self.raw[key]
elif isinstance(key, int):
return self.raw[key]
def __getattr__(self, key):
"""
This is where the magic happens that allows you to treat this as an
object that has all of the fields that the api returns. I seperate all
of the returned data in self._data
"""
# if it's not there then try to get it as an attribute
try:
return self.__getattribute__(key)
except AttributeError as e:
# makes unpickling work?
if key.startswith("__"):
raise e
return self._raw_response[key]
def _tab_completions(self):
if not self.raw:
return []
return list(self.raw.keys())
| (service, api_response_data, path_params, query_params, ok, payload='', injected_account_id=None) |
4,215 | springserve | __getitem__ | null | def __getitem__(self, key):
if isinstance(key, str):
return self.raw[key]
elif isinstance(key, int):
return self.raw[key]
| (self, key) |
4,216 | springserve | __init__ | null | def __init__(self, service, api_response_data, path_params, query_params,
ok, payload='', injected_account_id=None):
super(_VDAPIResponse, self).__init__()
self._service = service
self._raw_response = api_response_data
self._path_params = path_params
self._query_params = query_params or {}
self._ok = ok
self._payload = payload
self._injected_account_id = injected_account_id
| (self, service, api_response_data, path_params, query_params, ok, payload='', injected_account_id=None) |
4,218 | springserve | _VDAPIService | null | class _VDAPIService(object):
__API__ = None
__RESPONSE_OBJECT__ = _VDAPISingleResponse
__RESPONSES_OBJECT__ = _VDAPIMultiResponse
def __init__(self):
self.account_id = None
@property
def endpoint(self):
"""
The api endpoint that is used for this service. For example::
In [1]: import springserve
In [2]: springserve.supply_tags.endpoint
Out[2]: '/supply_tags'
"""
return "/" + self.__API__
def build_response(self, api_response, path_params, query_params, payload=''):
is_ok = api_response.ok
if not is_ok and api_response.status_code == 401:
raise VDAuthError("Need to Re-Auth")
if api_response.status_code == 204: # this means empty
resp_json = {}
else:
try:
resp_json = api_response.json
except:
resp_json = {"error": "error parsing json response"}
if isinstance(resp_json, list):
# wrap it in a multi container
return self.__RESPONSES_OBJECT__(self, resp_json, path_params,
query_params, self.__RESPONSE_OBJECT__,
is_ok, payload, self.account_id)
return self.__RESPONSE_OBJECT__(self, resp_json, path_params,
query_params, is_ok, payload,
self.account_id)
@raw_response_retry
def get_raw(self, path_param=None, reauth=False, **query_params):
"""
Get the raw http response for this object. This is rarely used by a
client unless they want to inspect the raw http fields
"""
params = _format_params(query_params)
return API(reauth=reauth).get(_format_url(self.endpoint, path_param),
params=params)
def get(self, path_param=None, reauth=False, **query_params):
"""
Make a get request to this api service. Allows you to pass in arbitrary
query paramaters.
Examples::
# get all supply_tags
tags = springserve.supply_tags.get()
for tag in tags:
print tag.id, tag.name
# get one supply tag
tag = springserve.supply_tag.get(1)
print tag.id, tag.name
# get by many ids
tags = springserve.supply_tags.get(ids=[1,2,3])
# get users that are account_contacts (ie, using query string # params)
users = springserve.users.get(account_contact=True)
"""
global API
try:
return self.build_response(
self.get_raw(path_param, reauth=reauth, **query_params),
path_param,
query_params
)
except VDAuthError as e:
# we only retry if we are redo'n on an auto reauth
if not reauth:
_msg.info("Reauthing and then retry")
return self.get(path_param, reauth=True, **query_params)
raise e
@raw_response_retry
def _put_raw(self, path_param, data, reauth=False, **query_params):
params = _format_params(query_params)
return API(reauth=reauth).put(
_format_url(self.endpoint, path_param),
params=params,
data=_json.dumps(data)
)
def put(self, path_param, data, reauth=False, **query_params):
global API
try:
return self.build_response(
self._put_raw(path_param, data, reauth=reauth, **query_params),
path_param,
query_params,
payload=data
)
except VDAuthError as e:
# we only retry if we are redo'n on an auto reauth
if not reauth:
_msg.info("Reauthing and then retry")
return self.put(path_param, data, reauth=True, **query_params)
raise e
@raw_response_retry
def _post_raw(self, data, path_param="", reauth=False, files=None, **query_params):
params = _format_params(query_params)
if not files:
return API(reauth=reauth).post(
_format_url(self.endpoint, path_param),
params=params,
data=_json.dumps(data)
)
m = MultipartEncoder(
fields=files
)
return API(reauth=reauth).post(
_format_url(self.endpoint, path_param),
headers={'Content-Type': m.content_type},
params=params,
data=m
)
def post(self, data, path_param="", files=None, reauth=False, **query_params):
global API
try:
return self.build_response(
self._post_raw(data, path_param, reauth=reauth, files=files, **query_params),
path_param,
query_params,
payload=data
)
except VDAuthError as e:
# we only retry if we are redo'n on an auto reauth
if not reauth:
_msg.info("Reauthing and then retry")
return self.post(data, path_param, reauth=True, files=files, **query_params)
# means that we had already tried a reauth and it failed
raise e
def delete(self, path_param="", reauth=False, **query_params):
global API
try:
params = _format_params(query_params)
return self.build_response(
API(reauth=reauth).delete(
_format_url(self.endpoint, path_param),
params=params,
),
path_param,
query_params
)
except VDAuthError as e:
# we only retry if we are redo'n on an auto reauth
if not reauth:
_msg.info("Reauthing and then retry")
return self.delete(path_param, reauth=True, **query_params)
# means that we had already tried a reauth and it failed
raise e
def _raw_bulk_delete(self, data, path_param="", reauth=False, files=None, **query_params):
params = _format_params(query_params)
if not files:
return API(reauth=reauth).delete(
_format_url(self.endpoint, path_param),
params=params,
data=_json.dumps(data)
)
m = MultipartEncoder(
fields=files
)
return API(reauth=reauth).delete(
_format_url(self.endpoint, path_param),
params=params,
headers={'Content-Type': m.content_type},
data=m
)
def bulk_delete(self, data, path_param="", reauth=False, files=None, **query_params):
"""
Delete an object.
"""
global API
try:
return self.build_response(
self._raw_bulk_delete(data, path_param=path_param,
reauth=reauth, files=files,
**query_params),
path_param,
query_params
)
except VDAuthError as e:
# we only retry if we are redo'n on an auto reauth
if not reauth:
_msg.info("Reauthing and then retry")
return self.buck_delete(data, path_param, reauth=True,
files=files, **query_params)
# means that we had already tried a reauth and it failed
raise e
def new(self, data, path_param="", reauth=False, **query_params):
"""
Create a new object. You need to pass in the required fields as a
dictionary. For instance::
resp = springserve.domain_lists.new({'name':'My Domain List'})
print resp.ok
"""
return self.post(data, path_param, reauth, **query_params)
| () |
4,231 | springserve | _VDAPISingleResponse | null | class _VDAPISingleResponse(_VDAPIResponse):
def __init__(self, service, api_response_data, path_params, query_params,
ok, payload='', injected_account_id=None):
self._dirty = {}
super(_VDAPISingleResponse, self).__init__(service, api_response_data,
path_params, query_params,
ok, payload,
injected_account_id)
def set_dirty(self, field):
"""
you need this for nested fields that you have changed
but didn't actually set
"""
self._dirty[field] = self._raw_response[field]
def save(self, dirty_only=False, **kwargs):
"""
Save this object back to the api after making changes. As an example::
tag = springserve.supply_tags.get(1)
tag.name = "This is my new name"
# this will print if the save went through correctly
print tag.save().ok
Returns:
An API response object
"""
# if they have dirty fields only send those
payload = self.raw
if dirty_only:
payload = self._dirty
try:
account_id = self.account_id
except Exception as e:
if self._injected_account_id:
account_id = self._injected_account_id
else:
raise e
return self._service.put(self.id, payload, account_id=account_id, **kwargs)
def duplicate(self, **kwargs):
payload = self.raw.copy()
payload.update(kwargs)
return self._service.new(payload, account_id=self.account_id)
def __setattr__(self, attr, value):
"""
If it's a property that was already defined when the class
was initialized that let it exist. If it's new than let's slap it into
_data. This allows us to set new attributes and save it back to the api
"""
# allows you to add any private field in the init
# I could do something else here but I think it makes
# sense to enforce private variables in the ConsoleObject
if attr.startswith('_'):
self.__dict__[attr] = value
if attr in self.__dict__:
self.__dict__[attr] = value
else:
# TODO - this is the only place where appnexus object fields get changed?
self._raw_response[attr] = value
self._dirty[attr] = value
| (service, api_response_data, path_params, query_params, ok, payload='', injected_account_id=None) |
4,235 | springserve | __init__ | null | def __init__(self, service, api_response_data, path_params, query_params,
ok, payload='', injected_account_id=None):
self._dirty = {}
super(_VDAPISingleResponse, self).__init__(service, api_response_data,
path_params, query_params,
ok, payload,
injected_account_id)
| (self, service, api_response_data, path_params, query_params, ok, payload='', injected_account_id=None) |
4,236 | springserve | __setattr__ |
If it's a property that was already defined when the class
was initialized that let it exist. If it's new than let's slap it into
_data. This allows us to set new attributes and save it back to the api
| def __setattr__(self, attr, value):
"""
If it's a property that was already defined when the class
was initialized that let it exist. If it's new than let's slap it into
_data. This allows us to set new attributes and save it back to the api
"""
# allows you to add any private field in the init
# I could do something else here but I think it makes
# sense to enforce private variables in the ConsoleObject
if attr.startswith('_'):
self.__dict__[attr] = value
if attr in self.__dict__:
self.__dict__[attr] = value
else:
# TODO - this is the only place where appnexus object fields get changed?
self._raw_response[attr] = value
self._dirty[attr] = value
| (self, attr, value) |
4,238 | springserve | duplicate | null | def duplicate(self, **kwargs):
payload = self.raw.copy()
payload.update(kwargs)
return self._service.new(payload, account_id=self.account_id)
| (self, **kwargs) |
4,239 | springserve | save |
Save this object back to the api after making changes. As an example::
tag = springserve.supply_tags.get(1)
tag.name = "This is my new name"
# this will print if the save went through correctly
print tag.save().ok
Returns:
An API response object
| def save(self, dirty_only=False, **kwargs):
"""
Save this object back to the api after making changes. As an example::
tag = springserve.supply_tags.get(1)
tag.name = "This is my new name"
# this will print if the save went through correctly
print tag.save().ok
Returns:
An API response object
"""
# if they have dirty fields only send those
payload = self.raw
if dirty_only:
payload = self._dirty
try:
account_id = self.account_id
except Exception as e:
if self._injected_account_id:
account_id = self._injected_account_id
else:
raise e
return self._service.put(self.id, payload, account_id=account_id, **kwargs)
| (self, dirty_only=False, **kwargs) |
4,240 | springserve | set_dirty |
you need this for nested fields that you have changed
but didn't actually set
| def set_dirty(self, field):
"""
you need this for nested fields that you have changed
but didn't actually set
"""
self._dirty[field] = self._raw_response[field]
| (self, field) |
4,241 | springserve | _VDDuplicateableResponse | null | class _VDDuplicateableResponse(_VDAPISingleResponse):
def duplicate(self, **kwargs):
return self._service.get("{}/duplicate".format(self.id), account_id=self.account_id)
| (service, api_response_data, path_params, query_params, ok, payload='', injected_account_id=None) |
4,248 | springserve | duplicate | null | def duplicate(self, **kwargs):
return self._service.get("{}/duplicate".format(self.id), account_id=self.account_id)
| (self, **kwargs) |
4,251 | springserve._demand | _VideoCreativeAPI | null | class _VideoCreativeAPI(_VDAPIService):
__API__ = "videos"
def upload_video(self, payload, video_file, **kwargs):
files={'video': (video_file.split('/')[-1], open(video_file, 'rb'),
"multipart/form-data")}
if payload:
files.update({x:str(y) for x,y in payload.items()})
return self.post(payload, files=files, **kwargs)
| () |
4,264 | springserve._demand | upload_video | null | def upload_video(self, payload, video_file, **kwargs):
files={'video': (video_file.split('/')[-1], open(video_file, 'rb'),
"multipart/form-data")}
if payload:
files.update({x:str(y) for x,y in payload.items()})
return self.post(payload, files=files, **kwargs)
| (self, payload, video_file, **kwargs) |
4,270 | springserve | _format_params | null | def _format_params(params):
_params = {}
for key, value in params.items():
if isinstance(value, list):
# make sure any list has the [] on it
key = "{}[]".format(key.lstrip("[]"))
_params[key] = value
return _params
| (params) |
4,271 | springserve | _format_url | null | def _format_url(endpoint, path_param):
_url = endpoint
if path_param:
_url += "/{}".format(path_param)
return _url
| (endpoint, path_param) |
4,272 | springserve | _install_ipython_completers | null | def _install_ipython_completers(): # pragma: no cover
from IPython.utils.generics import complete_object
@complete_object.when_type(_TabComplete)
def complete_report_object(obj, prev_completions):
"""
Add in all the methods of the _wrapped object so its
visible in iPython as well
"""
prev_completions += obj._tab_completions()
return prev_completions
| () |
4,278 | builtins | object | The base class of the class hierarchy.
When called, it accepts no arguments and returns a new featureless
instance that has no instance attributes and cannot be given any.
| from builtins import object
| () |
4,279 | springserve | raw_get | null | def raw_get(path_param, **query_params):
global API
params = _format_params(query_params)
return API().get(_format_url("", path_param), params=params).json
| (path_param, **query_params) |
4,280 | springserve._decorators | raw_response_retry |
Decorator for SpringServe API to handle retries (with exponential backoff) in the case
of a rate-limit or 5XX error.
Sleep duration and backoff factor control wait time between successive failures, e.g.
sleep_duration 3 and backoff_factor 2 means sleep 3s, 6s, 12s, 24s
:param int limit: Max number of retry attempts
:param int sleep_duration: Initial sleep time
:param float/int backoff_factor: Factor to increase sleep between successive retries.
| def raw_response_retry(api_call, limit=4, sleep_duration=5, backoff_factor=2):
"""
Decorator for SpringServe API to handle retries (with exponential backoff) in the case
of a rate-limit or 5XX error.
Sleep duration and backoff factor control wait time between successive failures, e.g.
sleep_duration 3 and backoff_factor 2 means sleep 3s, 6s, 12s, 24s
:param int limit: Max number of retry attempts
:param int sleep_duration: Initial sleep time
:param float/int backoff_factor: Factor to increase sleep between successive retries.
"""
def wrapped(*args, **kwargs):
sleeps = sleep_duration
num_attempts = 0
while num_attempts < limit:
# make the API call
resp = api_call(*args, **kwargs)
aws_check = (
# make sure it's the link response object
isinstance(resp, SpringServeAPIResponseWrapper) and
# HTTP status codes that are valid for retries
resp.status_code >= 500 and resp.status_code < 600 and
# content matches one of our error messages - note that ELB error
# messages will not be JSON (they are HTML strings) so cannot check
# resp.json attribute, as this will not always be valid
is_resp_in_elb_error_messages(resp)
)
rack_attack_check = (
isinstance(resp, SpringServeAPIResponseWrapper) and
resp.status_code == RACK_ATTACK_STATUS_CODE and
resp.content == RACK_ATTACK_MESSAGE
)
if aws_check or rack_attack_check:
_msg.warn("Encountered rate-limit (attempt {}), sleeping".format(num_attempts))
num_attempts += 1
time.sleep(sleeps)
sleeps *= backoff_factor
# call was either successful, or an error outside of the purview of this
# handler
else:
return resp
# We've hit max retry attempts, return anyways
return resp
return wrapped
| (api_call, limit=4, sleep_duration=5, backoff_factor=2) |
4,281 | springserve | set_credentials | null | def set_credentials(user, password, base_url='https://console.springserve.com/api/v0'):
global _CONFIG_OVERRIDE
_CONFIG_OVERRIDE = {'user': user, 'password': password, 'base_url': base_url}
API(True)
| (user, password, base_url='https://console.springserve.com/api/v0') |
4,282 | springserve | set_token | null | def set_token(token, base_url='https://console.springserve.com/api/v0'):
global _TOKEN_OVERRIDE
_TOKEN_OVERRIDE = {'token': token, 'base_url': base_url}
API(True)
| (token, base_url='https://console.springserve.com/api/v0') |
4,283 | springserve | setup_config |
This is used the first time you run it to set up your configuration if
you want to do that over the command prompt
| def setup_config():
"""
This is used the first time you run it to set up your configuration if
you want to do that over the command prompt
"""
current_file = _lnk.config_file()
current_config = _lnk.config() or {}
if current_config.get('springserve'):
print("already configured, remove or edit springserve section from {}".format(current_file))
confirm = input('Would you like overwrite[Y/n] ')
if not confirm or confirm.upper() != 'Y':
print("thanks")
return
user = input('Enter a user name: ')
password = getpass.getpass('Enter password: ')
current_config['springserve'] = {
'__default__': {
'base_url': _DEFAULT_BASE_URL,
'user': user,
'password': password,
'wrapper': "SpringServeAPI"
}
}
confirm = input('Would you like write[Y/n] ')
if not confirm or confirm.upper() != 'Y':
print("thanks")
return
print("writing config to: {}".format(current_file))
with open(current_file, 'w') as f:
f.write(_json.dumps(current_config, indent=4))
print("done: refreshing config")
_lnk.fresh()
| () |
4,285 | springserve | switch_account | null | def switch_account(account_name="__default__"):
global _ACCOUNT
_ACCOUNT = account_name
API(True)
| (account_name='__default__') |
4,286 | csv | Dialect | Describe a CSV dialect.
This must be subclassed (see csv.excel). Valid attributes are:
delimiter, quotechar, escapechar, doublequote, skipinitialspace,
lineterminator, quoting.
| class Dialect:
"""Describe a CSV dialect.
This must be subclassed (see csv.excel). Valid attributes are:
delimiter, quotechar, escapechar, doublequote, skipinitialspace,
lineterminator, quoting.
"""
_name = ""
_valid = False
# placeholders
delimiter = None
quotechar = None
escapechar = None
doublequote = None
skipinitialspace = None
lineterminator = None
quoting = None
def __init__(self):
if self.__class__ != Dialect:
self._valid = True
self._validate()
def _validate(self):
try:
_Dialect(self)
except TypeError as e:
# We do this for compatibility with py2.3
raise Error(str(e))
| () |
4,287 | csv | __init__ | null | def __init__(self):
if self.__class__ != Dialect:
self._valid = True
self._validate()
| (self) |
4,288 | csv | _validate | null | def _validate(self):
try:
_Dialect(self)
except TypeError as e:
# We do this for compatibility with py2.3
raise Error(str(e))
| (self) |
4,289 | unicodecsv.py3 | DictReader | null | class DictReader(csv.DictReader):
def __init__(self, csvfile, fieldnames=None, restkey=None, restval=None,
dialect='excel', encoding='utf-8', errors='strict', *args,
**kwds):
csv.DictReader.__init__(self, csvfile, fieldnames, restkey, restval,
dialect, *args, **kwds)
self.reader = UnicodeReader(csvfile, dialect, encoding=encoding,
errors=errors, *args, **kwds)
| (csvfile, fieldnames=None, restkey=None, restval=None, dialect='excel', encoding='utf-8', errors='strict', *args, **kwds) |
4,290 | unicodecsv.py3 | __init__ | null | def __init__(self, csvfile, fieldnames=None, restkey=None, restval=None,
dialect='excel', encoding='utf-8', errors='strict', *args,
**kwds):
csv.DictReader.__init__(self, csvfile, fieldnames, restkey, restval,
dialect, *args, **kwds)
self.reader = UnicodeReader(csvfile, dialect, encoding=encoding,
errors=errors, *args, **kwds)
| (self, csvfile, fieldnames=None, restkey=None, restval=None, dialect='excel', encoding='utf-8', errors='strict', *args, **kwds) |
4,291 | csv | __iter__ | null | def __iter__(self):
return self
| (self) |
4,292 | csv | __next__ | null | def __next__(self):
if self.line_num == 0:
# Used only for its side effect.
self.fieldnames
row = next(self.reader)
self.line_num = self.reader.line_num
# unlike the basic reader, we prefer not to return blanks,
# because we will typically wind up with a dict full of None
# values
while row == []:
row = next(self.reader)
d = dict(zip(self.fieldnames, row))
lf = len(self.fieldnames)
lr = len(row)
if lf < lr:
d[self.restkey] = row[lf:]
elif lf > lr:
for key in self.fieldnames[lr:]:
d[key] = self.restval
return d
| (self) |
4,293 | unicodecsv.py3 | DictWriter | null | class DictWriter(csv.DictWriter):
def __init__(self, csvfile, fieldnames, restval='',
extrasaction='raise', dialect='excel', encoding='utf-8',
errors='strict', *args, **kwds):
super().__init__(csvfile, fieldnames, restval,
extrasaction, dialect, *args, **kwds)
self.writer = UnicodeWriter(csvfile, dialect, encoding=encoding,
errors=errors, *args, **kwds)
self.encoding_errors = errors
def writeheader(self):
header = dict(zip(self.fieldnames, self.fieldnames))
self.writerow(header)
| (csvfile, fieldnames, restval='', extrasaction='raise', dialect='excel', encoding='utf-8', errors='strict', *args, **kwds) |
4,294 | unicodecsv.py3 | __init__ | null | def __init__(self, csvfile, fieldnames, restval='',
extrasaction='raise', dialect='excel', encoding='utf-8',
errors='strict', *args, **kwds):
super().__init__(csvfile, fieldnames, restval,
extrasaction, dialect, *args, **kwds)
self.writer = UnicodeWriter(csvfile, dialect, encoding=encoding,
errors=errors, *args, **kwds)
self.encoding_errors = errors
| (self, csvfile, fieldnames, restval='', extrasaction='raise', dialect='excel', encoding='utf-8', errors='strict', *args, **kwds) |
4,295 | csv | _dict_to_list | null | def _dict_to_list(self, rowdict):
if self.extrasaction == "raise":
wrong_fields = rowdict.keys() - self.fieldnames
if wrong_fields:
raise ValueError("dict contains fields not in fieldnames: "
+ ", ".join([repr(x) for x in wrong_fields]))
return (rowdict.get(key, self.restval) for key in self.fieldnames)
| (self, rowdict) |
4,296 | unicodecsv.py3 | writeheader | null | def writeheader(self):
header = dict(zip(self.fieldnames, self.fieldnames))
self.writerow(header)
| (self) |
4,297 | csv | writerow | null | def writerow(self, rowdict):
return self.writer.writerow(self._dict_to_list(rowdict))
| (self, rowdict) |
4,298 | csv | writerows | null | def writerows(self, rowdicts):
return self.writer.writerows(map(self._dict_to_list, rowdicts))
| (self, rowdicts) |
4,299 | _csv | Error | null | from _csv import Error
| null |
4,300 | csv | Sniffer |
"Sniffs" the format of a CSV file (i.e. delimiter, quotechar)
Returns a Dialect object.
| class Sniffer:
'''
"Sniffs" the format of a CSV file (i.e. delimiter, quotechar)
Returns a Dialect object.
'''
def __init__(self):
# in case there is more than one possible delimiter
self.preferred = [',', '\t', ';', ' ', ':']
def sniff(self, sample, delimiters=None):
"""
Returns a dialect (or None) corresponding to the sample
"""
quotechar, doublequote, delimiter, skipinitialspace = \
self._guess_quote_and_delimiter(sample, delimiters)
if not delimiter:
delimiter, skipinitialspace = self._guess_delimiter(sample,
delimiters)
if not delimiter:
raise Error("Could not determine delimiter")
class dialect(Dialect):
_name = "sniffed"
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
# escapechar = ''
dialect.doublequote = doublequote
dialect.delimiter = delimiter
# _csv.reader won't accept a quotechar of ''
dialect.quotechar = quotechar or '"'
dialect.skipinitialspace = skipinitialspace
return dialect
def _guess_quote_and_delimiter(self, data, delimiters):
"""
Looks for text enclosed between two identical quotes
(the probable quotechar) which are preceded and followed
by the same character (the probable delimiter).
For example:
,'some text',
The quote with the most wins, same with the delimiter.
If there is no quotechar the delimiter can't be determined
this way.
"""
matches = []
for restr in (r'(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # ".*?",
r'(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
matches = regexp.findall(data)
if matches:
break
if not matches:
# (quotechar, doublequote, delimiter, skipinitialspace)
return ('', False, None, 0)
quotes = {}
delims = {}
spaces = 0
groupindex = regexp.groupindex
for m in matches:
n = groupindex['quote'] - 1
key = m[n]
if key:
quotes[key] = quotes.get(key, 0) + 1
try:
n = groupindex['delim'] - 1
key = m[n]
except KeyError:
continue
if key and (delimiters is None or key in delimiters):
delims[key] = delims.get(key, 0) + 1
try:
n = groupindex['space'] - 1
except KeyError:
continue
if m[n]:
spaces += 1
quotechar = max(quotes, key=quotes.get)
if delims:
delim = max(delims, key=delims.get)
skipinitialspace = delims[delim] == spaces
if delim == '\n': # most likely a file with a single column
delim = ''
else:
# there is *no* delimiter, it's a single column of quoted data
delim = ''
skipinitialspace = 0
# if we see an extra quote between delimiters, we've got a
# double quoted format
dq_regexp = re.compile(
r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \
{'delim':re.escape(delim), 'quote':quotechar}, re.MULTILINE)
if dq_regexp.search(data):
doublequote = True
else:
doublequote = False
return (quotechar, doublequote, delim, skipinitialspace)
def _guess_delimiter(self, data, delimiters):
"""
The delimiter /should/ occur the same number of times on
each row. However, due to malformed data, it may not. We don't want
an all or nothing approach, so we allow for small variations in this
number.
1) build a table of the frequency of each character on every line.
2) build a table of frequencies of this frequency (meta-frequency?),
e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
7 times in 2 rows'
3) use the mode of the meta-frequency to determine the /expected/
frequency for that character
4) find out how often the character actually meets that goal
5) the character that best meets its goal is the delimiter
For performance reasons, the data is evaluated in chunks, so it can
try and evaluate the smallest portion of the data possible, evaluating
additional chunks as necessary.
"""
data = list(filter(None, data.split('\n')))
ascii = [chr(c) for c in range(127)] # 7-bit ASCII
# build frequency tables
chunkLength = min(10, len(data))
iteration = 0
charFrequency = {}
modes = {}
delims = {}
start, end = 0, chunkLength
while start < len(data):
iteration += 1
for line in data[start:end]:
for char in ascii:
metaFrequency = charFrequency.get(char, {})
# must count even if frequency is 0
freq = line.count(char)
# value is the mode
metaFrequency[freq] = metaFrequency.get(freq, 0) + 1
charFrequency[char] = metaFrequency
for char in charFrequency.keys():
items = list(charFrequency[char].items())
if len(items) == 1 and items[0][0] == 0:
continue
# get the mode of the frequencies
if len(items) > 1:
modes[char] = max(items, key=lambda x: x[1])
# adjust the mode - subtract the sum of all
# other frequencies
items.remove(modes[char])
modes[char] = (modes[char][0], modes[char][1]
- sum(item[1] for item in items))
else:
modes[char] = items[0]
# build a list of possible delimiters
modeList = modes.items()
total = float(min(chunkLength * iteration, len(data)))
# (rows of consistent data) / (number of rows) = 100%
consistency = 1.0
# minimum consistency threshold
threshold = 0.9
while len(delims) == 0 and consistency >= threshold:
for k, v in modeList:
if v[0] > 0 and v[1] > 0:
if ((v[1]/total) >= consistency and
(delimiters is None or k in delimiters)):
delims[k] = v
consistency -= 0.01
if len(delims) == 1:
delim = list(delims.keys())[0]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
# analyze another chunkLength lines
start = end
end += chunkLength
if not delims:
return ('', 0)
# if there's more than one, fall back to a 'preferred' list
if len(delims) > 1:
for d in self.preferred:
if d in delims.keys():
skipinitialspace = (data[0].count(d) ==
data[0].count("%c " % d))
return (d, skipinitialspace)
# nothing else indicates a preference, pick the character that
# dominates(?)
items = [(v,k) for (k,v) in delims.items()]
items.sort()
delim = items[-1][1]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
def has_header(self, sample):
# Creates a dictionary of types of data in each column. If any
# column is of a single type (say, integers), *except* for the first
# row, then the first row is presumed to be labels. If the type
# can't be determined, it is assumed to be a string in which case
# the length of the string is the determining factor: if all of the
# rows except for the first are the same length, it's a header.
# Finally, a 'vote' is taken at the end for each column, adding or
# subtracting from the likelihood of the first row being a header.
rdr = reader(StringIO(sample), self.sniff(sample))
header = next(rdr) # assume first row is header
columns = len(header)
columnTypes = {}
for i in range(columns): columnTypes[i] = None
checked = 0
for row in rdr:
# arbitrary number of rows to check, to keep it sane
if checked > 20:
break
checked += 1
if len(row) != columns:
continue # skip rows that have irregular number of columns
for col in list(columnTypes.keys()):
thisType = complex
try:
thisType(row[col])
except (ValueError, OverflowError):
# fallback to length of string
thisType = len(row[col])
if thisType != columnTypes[col]:
if columnTypes[col] is None: # add new column type
columnTypes[col] = thisType
else:
# type is inconsistent, remove column from
# consideration
del columnTypes[col]
# finally, compare results against first row and "vote"
# on whether it's a header
hasHeader = 0
for col, colType in columnTypes.items():
if type(colType) == type(0): # it's a length
if len(header[col]) != colType:
hasHeader += 1
else:
hasHeader -= 1
else: # attempt typecast
try:
colType(header[col])
except (ValueError, TypeError):
hasHeader += 1
else:
hasHeader -= 1
return hasHeader > 0
| () |
4,301 | csv | __init__ | null | def __init__(self):
# in case there is more than one possible delimiter
self.preferred = [',', '\t', ';', ' ', ':']
| (self) |
4,302 | csv | _guess_delimiter |
The delimiter /should/ occur the same number of times on
each row. However, due to malformed data, it may not. We don't want
an all or nothing approach, so we allow for small variations in this
number.
1) build a table of the frequency of each character on every line.
2) build a table of frequencies of this frequency (meta-frequency?),
e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
7 times in 2 rows'
3) use the mode of the meta-frequency to determine the /expected/
frequency for that character
4) find out how often the character actually meets that goal
5) the character that best meets its goal is the delimiter
For performance reasons, the data is evaluated in chunks, so it can
try and evaluate the smallest portion of the data possible, evaluating
additional chunks as necessary.
| def _guess_delimiter(self, data, delimiters):
"""
The delimiter /should/ occur the same number of times on
each row. However, due to malformed data, it may not. We don't want
an all or nothing approach, so we allow for small variations in this
number.
1) build a table of the frequency of each character on every line.
2) build a table of frequencies of this frequency (meta-frequency?),
e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
7 times in 2 rows'
3) use the mode of the meta-frequency to determine the /expected/
frequency for that character
4) find out how often the character actually meets that goal
5) the character that best meets its goal is the delimiter
For performance reasons, the data is evaluated in chunks, so it can
try and evaluate the smallest portion of the data possible, evaluating
additional chunks as necessary.
"""
data = list(filter(None, data.split('\n')))
ascii = [chr(c) for c in range(127)] # 7-bit ASCII
# build frequency tables
chunkLength = min(10, len(data))
iteration = 0
charFrequency = {}
modes = {}
delims = {}
start, end = 0, chunkLength
while start < len(data):
iteration += 1
for line in data[start:end]:
for char in ascii:
metaFrequency = charFrequency.get(char, {})
# must count even if frequency is 0
freq = line.count(char)
# value is the mode
metaFrequency[freq] = metaFrequency.get(freq, 0) + 1
charFrequency[char] = metaFrequency
for char in charFrequency.keys():
items = list(charFrequency[char].items())
if len(items) == 1 and items[0][0] == 0:
continue
# get the mode of the frequencies
if len(items) > 1:
modes[char] = max(items, key=lambda x: x[1])
# adjust the mode - subtract the sum of all
# other frequencies
items.remove(modes[char])
modes[char] = (modes[char][0], modes[char][1]
- sum(item[1] for item in items))
else:
modes[char] = items[0]
# build a list of possible delimiters
modeList = modes.items()
total = float(min(chunkLength * iteration, len(data)))
# (rows of consistent data) / (number of rows) = 100%
consistency = 1.0
# minimum consistency threshold
threshold = 0.9
while len(delims) == 0 and consistency >= threshold:
for k, v in modeList:
if v[0] > 0 and v[1] > 0:
if ((v[1]/total) >= consistency and
(delimiters is None or k in delimiters)):
delims[k] = v
consistency -= 0.01
if len(delims) == 1:
delim = list(delims.keys())[0]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
# analyze another chunkLength lines
start = end
end += chunkLength
if not delims:
return ('', 0)
# if there's more than one, fall back to a 'preferred' list
if len(delims) > 1:
for d in self.preferred:
if d in delims.keys():
skipinitialspace = (data[0].count(d) ==
data[0].count("%c " % d))
return (d, skipinitialspace)
# nothing else indicates a preference, pick the character that
# dominates(?)
items = [(v,k) for (k,v) in delims.items()]
items.sort()
delim = items[-1][1]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
| (self, data, delimiters) |
4,303 | csv | _guess_quote_and_delimiter |
Looks for text enclosed between two identical quotes
(the probable quotechar) which are preceded and followed
by the same character (the probable delimiter).
For example:
,'some text',
The quote with the most wins, same with the delimiter.
If there is no quotechar the delimiter can't be determined
this way.
| def _guess_quote_and_delimiter(self, data, delimiters):
"""
Looks for text enclosed between two identical quotes
(the probable quotechar) which are preceded and followed
by the same character (the probable delimiter).
For example:
,'some text',
The quote with the most wins, same with the delimiter.
If there is no quotechar the delimiter can't be determined
this way.
"""
matches = []
for restr in (r'(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # ".*?",
r'(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
matches = regexp.findall(data)
if matches:
break
if not matches:
# (quotechar, doublequote, delimiter, skipinitialspace)
return ('', False, None, 0)
quotes = {}
delims = {}
spaces = 0
groupindex = regexp.groupindex
for m in matches:
n = groupindex['quote'] - 1
key = m[n]
if key:
quotes[key] = quotes.get(key, 0) + 1
try:
n = groupindex['delim'] - 1
key = m[n]
except KeyError:
continue
if key and (delimiters is None or key in delimiters):
delims[key] = delims.get(key, 0) + 1
try:
n = groupindex['space'] - 1
except KeyError:
continue
if m[n]:
spaces += 1
quotechar = max(quotes, key=quotes.get)
if delims:
delim = max(delims, key=delims.get)
skipinitialspace = delims[delim] == spaces
if delim == '\n': # most likely a file with a single column
delim = ''
else:
# there is *no* delimiter, it's a single column of quoted data
delim = ''
skipinitialspace = 0
# if we see an extra quote between delimiters, we've got a
# double quoted format
dq_regexp = re.compile(
r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \
{'delim':re.escape(delim), 'quote':quotechar}, re.MULTILINE)
if dq_regexp.search(data):
doublequote = True
else:
doublequote = False
return (quotechar, doublequote, delim, skipinitialspace)
| (self, data, delimiters) |
4,304 | csv | has_header | null | def has_header(self, sample):
# Creates a dictionary of types of data in each column. If any
# column is of a single type (say, integers), *except* for the first
# row, then the first row is presumed to be labels. If the type
# can't be determined, it is assumed to be a string in which case
# the length of the string is the determining factor: if all of the
# rows except for the first are the same length, it's a header.
# Finally, a 'vote' is taken at the end for each column, adding or
# subtracting from the likelihood of the first row being a header.
rdr = reader(StringIO(sample), self.sniff(sample))
header = next(rdr) # assume first row is header
columns = len(header)
columnTypes = {}
for i in range(columns): columnTypes[i] = None
checked = 0
for row in rdr:
# arbitrary number of rows to check, to keep it sane
if checked > 20:
break
checked += 1
if len(row) != columns:
continue # skip rows that have irregular number of columns
for col in list(columnTypes.keys()):
thisType = complex
try:
thisType(row[col])
except (ValueError, OverflowError):
# fallback to length of string
thisType = len(row[col])
if thisType != columnTypes[col]:
if columnTypes[col] is None: # add new column type
columnTypes[col] = thisType
else:
# type is inconsistent, remove column from
# consideration
del columnTypes[col]
# finally, compare results against first row and "vote"
# on whether it's a header
hasHeader = 0
for col, colType in columnTypes.items():
if type(colType) == type(0): # it's a length
if len(header[col]) != colType:
hasHeader += 1
else:
hasHeader -= 1
else: # attempt typecast
try:
colType(header[col])
except (ValueError, TypeError):
hasHeader += 1
else:
hasHeader -= 1
return hasHeader > 0
| (self, sample) |
4,305 | csv | sniff |
Returns a dialect (or None) corresponding to the sample
| def sniff(self, sample, delimiters=None):
"""
Returns a dialect (or None) corresponding to the sample
"""
quotechar, doublequote, delimiter, skipinitialspace = \
self._guess_quote_and_delimiter(sample, delimiters)
if not delimiter:
delimiter, skipinitialspace = self._guess_delimiter(sample,
delimiters)
if not delimiter:
raise Error("Could not determine delimiter")
class dialect(Dialect):
_name = "sniffed"
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
# escapechar = ''
dialect.doublequote = doublequote
dialect.delimiter = delimiter
# _csv.reader won't accept a quotechar of ''
dialect.quotechar = quotechar or '"'
dialect.skipinitialspace = skipinitialspace
return dialect
| (self, sample, delimiters=None) |
4,306 | unicodecsv.py3 | UnicodeReader | null | class UnicodeReader(object):
def __init__(self, f, dialect=None, encoding='utf-8', errors='strict',
**kwds):
format_params = ['delimiter', 'doublequote', 'escapechar',
'lineterminator', 'quotechar', 'quoting',
'skipinitialspace']
if dialect is None:
if not any([kwd_name in format_params
for kwd_name in kwds.keys()]):
dialect = csv.excel
f = (bs.decode(encoding, errors=errors) for bs in f)
self.reader = csv.reader(f, dialect, **kwds)
def __next__(self):
return self.reader.__next__()
def __iter__(self):
return self
@property
def dialect(self):
return self.reader.dialect
@property
def line_num(self):
return self.reader.line_num
| (f, dialect=None, encoding='utf-8', errors='strict', **kwds) |
4,307 | unicodecsv.py3 | __init__ | null | def __init__(self, f, dialect=None, encoding='utf-8', errors='strict',
**kwds):
format_params = ['delimiter', 'doublequote', 'escapechar',
'lineterminator', 'quotechar', 'quoting',
'skipinitialspace']
if dialect is None:
if not any([kwd_name in format_params
for kwd_name in kwds.keys()]):
dialect = csv.excel
f = (bs.decode(encoding, errors=errors) for bs in f)
self.reader = csv.reader(f, dialect, **kwds)
| (self, f, dialect=None, encoding='utf-8', errors='strict', **kwds) |
4,309 | unicodecsv.py3 | __next__ | null | def __next__(self):
return self.reader.__next__()
| (self) |
4,310 | unicodecsv.py3 | UnicodeWriter | null | class UnicodeWriter(object):
def __init__(self, f, dialect=csv.excel, encoding='utf-8', errors='strict',
*args, **kwds):
if f is None:
raise TypeError
f = _UnicodeWriteWrapper(f, encoding=encoding, errors=errors)
self.writer = csv.writer(f, dialect, *args, **kwds)
def writerow(self, row):
return self.writer.writerow(row)
def writerows(self, rows):
return self.writer.writerows(rows)
@property
def dialect(self):
return self.writer.dialect
| (f, dialect=<class 'csv.excel'>, encoding='utf-8', errors='strict', *args, **kwds) |
4,311 | unicodecsv.py3 | __init__ | null | def __init__(self, f, dialect=csv.excel, encoding='utf-8', errors='strict',
*args, **kwds):
if f is None:
raise TypeError
f = _UnicodeWriteWrapper(f, encoding=encoding, errors=errors)
self.writer = csv.writer(f, dialect, *args, **kwds)
| (self, f, dialect=<class 'csv.excel'>, encoding='utf-8', errors='strict', *args, **kwds) |
4,312 | unicodecsv.py3 | writerow | null | def writerow(self, row):
return self.writer.writerow(row)
| (self, row) |
4,313 | unicodecsv.py3 | writerows | null | def writerows(self, rows):
return self.writer.writerows(rows)
| (self, rows) |
4,315 | csv | excel | Describe the usual properties of Excel-generated CSV files. | class excel(Dialect):
"""Describe the usual properties of Excel-generated CSV files."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
| () |
4,318 | csv | excel_tab | Describe the usual properties of Excel-generated TAB-delimited files. | class excel_tab(excel):
"""Describe the usual properties of Excel-generated TAB-delimited files."""
delimiter = '\t'
| () |
4,327 | csv | unix_dialect | Describe the usual properties of Unix-generated CSV files. | class unix_dialect(Dialect):
"""Describe the usual properties of Unix-generated CSV files."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\n'
quoting = QUOTE_ALL
| () |
4,335 | mdit_py_toc.plugin | slugify |
Convert title text to id slug for link references
Args:
title: Text to convert into a slug
| def slugify(title: str) -> str:
"""
Convert title text to id slug for link references
Args:
title: Text to convert into a slug
"""
return re.sub(
r"[^\w\u4e00-\u9fff\- ]", "", title.strip().lower().replace(" ", "-")
)
| (title: str) -> str |
4,336 | openapi_spec_validator.validation.validators | OpenAPIV2SpecValidator | null | class OpenAPIV2SpecValidator(SpecValidator):
schema_validator = openapi_v2_schema_validator
keyword_validators = {
"__root__": keywords.RootValidator,
"components": keywords.ComponentsValidator,
"default": keywords.OpenAPIV30ValueValidator,
"operation": keywords.OperationValidator,
"parameter": keywords.OpenAPIV2ParameterValidator,
"parameters": keywords.ParametersValidator,
"paths": keywords.PathsValidator,
"path": keywords.PathValidator,
"response": keywords.OpenAPIV2ResponseValidator,
"responses": keywords.ResponsesValidator,
"schema": keywords.SchemaValidator,
"schemas": keywords.SchemasValidator,
}
root_keywords = ["paths", "components"]
| (schema: Union[Mapping[Hashable, Any], jsonschema_path.paths.SchemaPath], base_uri: str = '', spec_url: Optional[str] = None) -> None |
4,337 | openapi_spec_validator.validation.validators | __init__ | null | def __init__(
self,
schema: AnySchema,
base_uri: str = "",
spec_url: Optional[str] = None,
) -> None:
if spec_url is not None:
warnings.warn(
"spec_url parameter is deprecated. " "Use base_uri instead.",
DeprecationWarning,
)
base_uri = spec_url
self.base_uri = base_uri
if isinstance(schema, SchemaPath):
self.schema_path = schema
self.schema = schema.contents()
else:
self.schema = schema
self.schema_path = SchemaPath.from_dict(
self.schema,
base_uri=self.base_uri,
handlers=self.resolver_handlers,
)
self.keyword_validators_registry = KeywordValidatorRegistry(
self.keyword_validators
)
| (self, schema: Union[Mapping[Hashable, Any], jsonschema_path.paths.SchemaPath], base_uri: str = '', spec_url: Optional[str] = None) -> NoneType |
4,338 | openapi_spec_validator.validation.validators | is_valid | null | def is_valid(self) -> bool:
error = next(self.iter_errors(), None)
return error is None
| (self) -> bool |
4,340 | openapi_spec_validator.validation.validators | validate | null | def validate(self) -> None:
for err in self.iter_errors():
raise err
| (self) -> NoneType |
4,341 | openapi_spec_validator.validation.validators | OpenAPIV30SpecValidator | null | class OpenAPIV30SpecValidator(SpecValidator):
schema_validator = openapi_v30_schema_validator
keyword_validators = {
"__root__": keywords.RootValidator,
"components": keywords.ComponentsValidator,
"content": keywords.ContentValidator,
"default": keywords.OpenAPIV30ValueValidator,
"mediaType": keywords.MediaTypeValidator,
"operation": keywords.OperationValidator,
"parameter": keywords.ParameterValidator,
"parameters": keywords.ParametersValidator,
"paths": keywords.PathsValidator,
"path": keywords.PathValidator,
"response": keywords.OpenAPIV3ResponseValidator,
"responses": keywords.ResponsesValidator,
"schema": keywords.SchemaValidator,
"schemas": keywords.SchemasValidator,
}
root_keywords = ["paths", "components"]
| (schema: Union[Mapping[Hashable, Any], jsonschema_path.paths.SchemaPath], base_uri: str = '', spec_url: Optional[str] = None) -> None |
4,346 | openapi_spec_validator.validation.validators | OpenAPIV31SpecValidator | null | class OpenAPIV31SpecValidator(SpecValidator):
schema_validator = openapi_v31_schema_validator
keyword_validators = {
"__root__": keywords.RootValidator,
"components": keywords.ComponentsValidator,
"content": keywords.ContentValidator,
"default": keywords.OpenAPIV31ValueValidator,
"mediaType": keywords.MediaTypeValidator,
"operation": keywords.OperationValidator,
"parameter": keywords.ParameterValidator,
"parameters": keywords.ParametersValidator,
"paths": keywords.PathsValidator,
"path": keywords.PathValidator,
"response": keywords.OpenAPIV3ResponseValidator,
"responses": keywords.ResponsesValidator,
"schema": keywords.SchemaValidator,
"schemas": keywords.SchemasValidator,
}
root_keywords = ["paths", "components"]
| (schema: Union[Mapping[Hashable, Any], jsonschema_path.paths.SchemaPath], base_uri: str = '', spec_url: Optional[str] = None) -> None |
4,359 | openapi_spec_validator.shortcuts | validate | null | def validate(
spec: Schema,
base_uri: str = "",
cls: Optional[SpecValidatorType] = None,
) -> None:
if cls is None:
cls = get_validator_cls(spec)
sp = SchemaPath.from_dict(spec, base_uri=base_uri)
v = cls(sp)
return v.validate()
| (spec: Mapping[Hashable, Any], base_uri: str = '', cls: Optional[Type[openapi_spec_validator.validation.validators.SpecValidator]] = None) -> NoneType |
4,360 | openapi_spec_validator.shortcuts | validate_spec | null | def validate_spec(
spec: Schema,
base_uri: str = "",
validator: Optional[SupportsValidation] = None,
cls: Optional[SpecValidatorType] = None,
spec_url: Optional[str] = None,
) -> None:
warnings.warn(
"validate_spec shortcut is deprecated. Use validate instead.",
DeprecationWarning,
)
if validator is not None:
warnings.warn(
"validator parameter is deprecated. Use cls instead.",
DeprecationWarning,
)
return validator.validate(spec, base_uri=base_uri, spec_url=spec_url)
if cls is None:
cls = get_validator_cls(spec)
v = cls(spec)
return v.validate()
| (spec: Mapping[Hashable, Any], base_uri: str = '', validator: Optional[openapi_spec_validator.validation.protocols.SupportsValidation] = None, cls: Optional[Type[openapi_spec_validator.validation.validators.SpecValidator]] = None, spec_url: Optional[str] = None) -> NoneType |
4,361 | openapi_spec_validator.shortcuts | validate_spec_url | null | def validate_spec_url(
spec_url: str,
validator: Optional[SupportsValidation] = None,
cls: Optional[Type[SpecValidator]] = None,
) -> None:
warnings.warn(
"validate_spec_url shortcut is deprecated. Use validate_url instead.",
DeprecationWarning,
)
if validator is not None:
warnings.warn(
"validator parameter is deprecated. Use cls instead.",
DeprecationWarning,
)
spec = all_urls_handler(spec_url)
return validator.validate(spec, base_uri=spec_url)
return validate_url(spec_url, cls=cls)
| (spec_url: str, validator: Optional[openapi_spec_validator.validation.protocols.SupportsValidation] = None, cls: Optional[Type[openapi_spec_validator.validation.validators.SpecValidator]] = None) -> NoneType |
4,362 | openapi_spec_validator.shortcuts | validate_url | null | def validate_url(
spec_url: str,
cls: Optional[Type[SpecValidator]] = None,
) -> None:
spec = all_urls_handler(spec_url)
return validate(spec, base_uri=spec_url, cls=cls)
| (spec_url: str, cls: Optional[Type[openapi_spec_validator.validation.validators.SpecValidator]] = None) -> NoneType |
4,365 | dagster._core.libraries | DagsterLibraryRegistry | null | class DagsterLibraryRegistry:
_libraries: Dict[str, str] = {"dagster": __version__}
@classmethod
def register(cls, name: str, version: str):
check_dagster_package_version(name, version)
cls._libraries[name] = version
@classmethod
def get(cls) -> Mapping[str, str]:
return cls._libraries.copy()
| () |
4,366 | dagster_snowflake.resources | SnowflakeConnection | A connection to Snowflake that can execute queries. In general this class should not be
directly instantiated, but rather used as a resource in an op or asset via the
:py:func:`snowflake_resource`.
Note that the SnowflakeConnection is only used by the snowflake_resource. The Pythonic SnowflakeResource does
not use this SnowflakeConnection class.
| class SnowflakeConnection:
"""A connection to Snowflake that can execute queries. In general this class should not be
directly instantiated, but rather used as a resource in an op or asset via the
:py:func:`snowflake_resource`.
Note that the SnowflakeConnection is only used by the snowflake_resource. The Pythonic SnowflakeResource does
not use this SnowflakeConnection class.
"""
def __init__(
self, config: Mapping[str, str], log, snowflake_connection_resource: SnowflakeResource
):
self.snowflake_connection_resource = snowflake_connection_resource
self.log = log
@public
@contextmanager
def get_connection(
self, raw_conn: bool = True
) -> Iterator[Union[SqlDbConnection, snowflake.connector.SnowflakeConnection]]:
"""Gets a connection to Snowflake as a context manager.
If using the execute_query, execute_queries, or load_table_from_local_parquet methods,
you do not need to create a connection using this context manager.
Args:
raw_conn (bool): If using the sqlalchemy connector, you can set raw_conn to True to create a raw
connection. Defaults to True.
Examples:
.. code-block:: python
@op(
required_resource_keys={"snowflake"}
)
def get_query_status(query_id):
with context.resources.snowflake.get_connection() as conn:
# conn is a Snowflake Connection object or a SQLAlchemy Connection if
# sqlalchemy is specified as the connector in the Snowflake Resource config
return conn.get_query_status(query_id)
"""
with self.snowflake_connection_resource.get_connection(raw_conn=raw_conn) as conn:
yield conn
@public
def execute_query(
self,
sql: str,
parameters: Optional[Union[Sequence[Any], Mapping[Any, Any]]] = None,
fetch_results: bool = False,
use_pandas_result: bool = False,
):
"""Execute a query in Snowflake.
Args:
sql (str): the query to be executed
parameters (Optional[Union[Sequence[Any], Mapping[Any, Any]]]): Parameters to be passed to the query. See the
`Snowflake documentation <https://docs.snowflake.com/en/user-guide/python-connector-example.html#binding-data>`__
for more information.
fetch_results (bool): If True, will return the result of the query. Defaults to False. If True
and use_pandas_result is also True, results will be returned as a Pandas DataFrame.
use_pandas_result (bool): If True, will return the result of the query as a Pandas DataFrame.
Defaults to False. If fetch_results is False and use_pandas_result is True, an error will be
raised.
Returns:
The result of the query if fetch_results or use_pandas_result is True, otherwise returns None
Examples:
.. code-block:: python
@op
def drop_database(snowflake: SnowflakeResource):
snowflake.execute_query(
"DROP DATABASE IF EXISTS MY_DATABASE"
)
"""
check.str_param(sql, "sql")
check.opt_inst_param(parameters, "parameters", (list, dict))
check.bool_param(fetch_results, "fetch_results")
if not fetch_results and use_pandas_result:
check.failed("If use_pandas_result is True, fetch_results must also be True.")
with self.get_connection() as conn:
with closing(conn.cursor()) as cursor:
if sys.version_info[0] < 3:
sql = sql.encode("utf-8")
self.log.info("Executing query: " + sql)
parameters = dict(parameters) if isinstance(parameters, Mapping) else parameters
cursor.execute(sql, parameters)
if use_pandas_result:
return cursor.fetch_pandas_all()
if fetch_results:
return cursor.fetchall()
@public
def execute_queries(
self,
sql_queries: Sequence[str],
parameters: Optional[Union[Sequence[Any], Mapping[Any, Any]]] = None,
fetch_results: bool = False,
use_pandas_result: bool = False,
) -> Optional[Sequence[Any]]:
"""Execute multiple queries in Snowflake.
Args:
sql_queries (str): List of queries to be executed in series
parameters (Optional[Union[Sequence[Any], Mapping[Any, Any]]]): Parameters to be passed to every query. See the
`Snowflake documentation <https://docs.snowflake.com/en/user-guide/python-connector-example.html#binding-data>`__
for more information.
fetch_results (bool): If True, will return the results of the queries as a list. Defaults to False. If True
and use_pandas_result is also True, results will be returned as Pandas DataFrames.
use_pandas_result (bool): If True, will return the results of the queries as a list of a Pandas DataFrames.
Defaults to False. If fetch_results is False and use_pandas_result is True, an error will be
raised.
Returns:
The results of the queries as a list if fetch_results or use_pandas_result is True,
otherwise returns None
Examples:
.. code-block:: python
@op
def create_fresh_database(snowflake: SnowflakeResource):
queries = ["DROP DATABASE IF EXISTS MY_DATABASE", "CREATE DATABASE MY_DATABASE"]
snowflake.execute_queries(
sql_queries=queries
)
"""
check.sequence_param(sql_queries, "sql_queries", of_type=str)
check.opt_inst_param(parameters, "parameters", (list, dict))
check.bool_param(fetch_results, "fetch_results")
if not fetch_results and use_pandas_result:
check.failed("If use_pandas_result is True, fetch_results must also be True.")
results: List[Any] = []
with self.get_connection() as conn:
with closing(conn.cursor()) as cursor:
for raw_sql in sql_queries:
sql = raw_sql.encode("utf-8") if sys.version_info[0] < 3 else raw_sql
self.log.info("Executing query: " + sql)
parameters = dict(parameters) if isinstance(parameters, Mapping) else parameters
cursor.execute(sql, parameters)
if use_pandas_result:
results = results.append(cursor.fetch_pandas_all()) # type: ignore
elif fetch_results:
results.append(cursor.fetchall())
return results if len(results) > 0 else None
@public
def load_table_from_local_parquet(self, src: str, table: str):
"""Stores the content of a parquet file to a Snowflake table.
Args:
src (str): the name of the file to store in Snowflake
table (str): the name of the table to store the data. If the table does not exist, it will
be created. Otherwise the contents of the table will be replaced with the data in src
Examples:
.. code-block:: python
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
@op
def write_parquet_file(snowflake: SnowflakeResource):
df = pd.DataFrame({"one": [1, 2, 3], "ten": [11, 12, 13]})
table = pa.Table.from_pandas(df)
pq.write_table(table, "example.parquet')
snowflake.load_table_from_local_parquet(
src="example.parquet",
table="MY_TABLE"
)
"""
check.str_param(src, "src")
check.str_param(table, "table")
sql_queries = [
f"CREATE OR REPLACE TABLE {table} ( data VARIANT DEFAULT NULL);",
"CREATE OR REPLACE FILE FORMAT parquet_format TYPE = 'parquet';",
f"PUT {src} @%{table};",
f"COPY INTO {table} FROM @%{table} FILE_FORMAT = (FORMAT_NAME = 'parquet_format');",
]
self.execute_queries(sql_queries)
| (config: Mapping[str, str], log, snowflake_connection_resource: dagster_snowflake.resources.SnowflakeResource) |
4,367 | dagster_snowflake.resources | __init__ | null | def __init__(
self, config: Mapping[str, str], log, snowflake_connection_resource: SnowflakeResource
):
self.snowflake_connection_resource = snowflake_connection_resource
self.log = log
| (self, config: Mapping[str, str], log, snowflake_connection_resource: dagster_snowflake.resources.SnowflakeResource) |
4,368 | dagster_snowflake.resources | execute_queries | Execute multiple queries in Snowflake.
Args:
sql_queries (str): List of queries to be executed in series
parameters (Optional[Union[Sequence[Any], Mapping[Any, Any]]]): Parameters to be passed to every query. See the
`Snowflake documentation <https://docs.snowflake.com/en/user-guide/python-connector-example.html#binding-data>`__
for more information.
fetch_results (bool): If True, will return the results of the queries as a list. Defaults to False. If True
and use_pandas_result is also True, results will be returned as Pandas DataFrames.
use_pandas_result (bool): If True, will return the results of the queries as a list of a Pandas DataFrames.
Defaults to False. If fetch_results is False and use_pandas_result is True, an error will be
raised.
Returns:
The results of the queries as a list if fetch_results or use_pandas_result is True,
otherwise returns None
Examples:
.. code-block:: python
@op
def create_fresh_database(snowflake: SnowflakeResource):
queries = ["DROP DATABASE IF EXISTS MY_DATABASE", "CREATE DATABASE MY_DATABASE"]
snowflake.execute_queries(
sql_queries=queries
)
| @public
def execute_queries(
self,
sql_queries: Sequence[str],
parameters: Optional[Union[Sequence[Any], Mapping[Any, Any]]] = None,
fetch_results: bool = False,
use_pandas_result: bool = False,
) -> Optional[Sequence[Any]]:
"""Execute multiple queries in Snowflake.
Args:
sql_queries (str): List of queries to be executed in series
parameters (Optional[Union[Sequence[Any], Mapping[Any, Any]]]): Parameters to be passed to every query. See the
`Snowflake documentation <https://docs.snowflake.com/en/user-guide/python-connector-example.html#binding-data>`__
for more information.
fetch_results (bool): If True, will return the results of the queries as a list. Defaults to False. If True
and use_pandas_result is also True, results will be returned as Pandas DataFrames.
use_pandas_result (bool): If True, will return the results of the queries as a list of a Pandas DataFrames.
Defaults to False. If fetch_results is False and use_pandas_result is True, an error will be
raised.
Returns:
The results of the queries as a list if fetch_results or use_pandas_result is True,
otherwise returns None
Examples:
.. code-block:: python
@op
def create_fresh_database(snowflake: SnowflakeResource):
queries = ["DROP DATABASE IF EXISTS MY_DATABASE", "CREATE DATABASE MY_DATABASE"]
snowflake.execute_queries(
sql_queries=queries
)
"""
check.sequence_param(sql_queries, "sql_queries", of_type=str)
check.opt_inst_param(parameters, "parameters", (list, dict))
check.bool_param(fetch_results, "fetch_results")
if not fetch_results and use_pandas_result:
check.failed("If use_pandas_result is True, fetch_results must also be True.")
results: List[Any] = []
with self.get_connection() as conn:
with closing(conn.cursor()) as cursor:
for raw_sql in sql_queries:
sql = raw_sql.encode("utf-8") if sys.version_info[0] < 3 else raw_sql
self.log.info("Executing query: " + sql)
parameters = dict(parameters) if isinstance(parameters, Mapping) else parameters
cursor.execute(sql, parameters)
if use_pandas_result:
results = results.append(cursor.fetch_pandas_all()) # type: ignore
elif fetch_results:
results.append(cursor.fetchall())
return results if len(results) > 0 else None
| (self, sql_queries: Sequence[str], parameters: Union[Sequence[Any], Mapping[Any, Any], NoneType] = None, fetch_results: bool = False, use_pandas_result: bool = False) -> Optional[Sequence[Any]] |
4,369 | dagster_snowflake.resources | execute_query | Execute a query in Snowflake.
Args:
sql (str): the query to be executed
parameters (Optional[Union[Sequence[Any], Mapping[Any, Any]]]): Parameters to be passed to the query. See the
`Snowflake documentation <https://docs.snowflake.com/en/user-guide/python-connector-example.html#binding-data>`__
for more information.
fetch_results (bool): If True, will return the result of the query. Defaults to False. If True
and use_pandas_result is also True, results will be returned as a Pandas DataFrame.
use_pandas_result (bool): If True, will return the result of the query as a Pandas DataFrame.
Defaults to False. If fetch_results is False and use_pandas_result is True, an error will be
raised.
Returns:
The result of the query if fetch_results or use_pandas_result is True, otherwise returns None
Examples:
.. code-block:: python
@op
def drop_database(snowflake: SnowflakeResource):
snowflake.execute_query(
"DROP DATABASE IF EXISTS MY_DATABASE"
)
| @public
def execute_query(
self,
sql: str,
parameters: Optional[Union[Sequence[Any], Mapping[Any, Any]]] = None,
fetch_results: bool = False,
use_pandas_result: bool = False,
):
"""Execute a query in Snowflake.
Args:
sql (str): the query to be executed
parameters (Optional[Union[Sequence[Any], Mapping[Any, Any]]]): Parameters to be passed to the query. See the
`Snowflake documentation <https://docs.snowflake.com/en/user-guide/python-connector-example.html#binding-data>`__
for more information.
fetch_results (bool): If True, will return the result of the query. Defaults to False. If True
and use_pandas_result is also True, results will be returned as a Pandas DataFrame.
use_pandas_result (bool): If True, will return the result of the query as a Pandas DataFrame.
Defaults to False. If fetch_results is False and use_pandas_result is True, an error will be
raised.
Returns:
The result of the query if fetch_results or use_pandas_result is True, otherwise returns None
Examples:
.. code-block:: python
@op
def drop_database(snowflake: SnowflakeResource):
snowflake.execute_query(
"DROP DATABASE IF EXISTS MY_DATABASE"
)
"""
check.str_param(sql, "sql")
check.opt_inst_param(parameters, "parameters", (list, dict))
check.bool_param(fetch_results, "fetch_results")
if not fetch_results and use_pandas_result:
check.failed("If use_pandas_result is True, fetch_results must also be True.")
with self.get_connection() as conn:
with closing(conn.cursor()) as cursor:
if sys.version_info[0] < 3:
sql = sql.encode("utf-8")
self.log.info("Executing query: " + sql)
parameters = dict(parameters) if isinstance(parameters, Mapping) else parameters
cursor.execute(sql, parameters)
if use_pandas_result:
return cursor.fetch_pandas_all()
if fetch_results:
return cursor.fetchall()
| (self, sql: str, parameters: Union[Sequence[Any], Mapping[Any, Any], NoneType] = None, fetch_results: bool = False, use_pandas_result: bool = False) |
Subsets and Splits