text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Poll an authorization until it is in a state other than pending or
<END_TASK>
<USER_TASK:>
Description:
def poll_until_valid(authzr, clock, client, timeout=300.0):
"""
Poll an authorization until it is in a state other than pending or
processing.
:param ~acme.messages.AuthorizationResource auth: The authorization to
complete.
:param clock: The ``IReactorTime`` implementation to use; usually the
reactor, when not testing.
:param .Client client: The ACME client.
:param float timeout: Maximum time to poll in seconds, before giving up.
:raises txacme.client.AuthorizationFailed: if the authorization is no
longer in the pending, processing, or valid states.
:raises: ``twisted.internet.defer.CancelledError`` if the authorization was
still in pending or processing state when the timeout was reached.
:rtype: Deferred[`~acme.messages.AuthorizationResource`]
:return: A deferred firing when the authorization has completed/failed; if
the authorization is valid, the authorization resource will be
returned.
""" |
def repoll(result):
authzr, retry_after = result
if authzr.body.status in {STATUS_PENDING, STATUS_PROCESSING}:
return (
deferLater(clock, retry_after, lambda: None)
.addCallback(lambda _: client.poll(authzr))
.addCallback(repoll)
)
if authzr.body.status != STATUS_VALID:
raise AuthorizationFailed(authzr)
return authzr
def cancel_timeout(result):
if timeout_call.active():
timeout_call.cancel()
return result
d = client.poll(authzr).addCallback(repoll)
timeout_call = clock.callLater(timeout, d.cancel)
d.addBoth(cancel_timeout)
return d |
<SYSTEM_TASK:>
Construct a client from an ACME directory at a given URL.
<END_TASK>
<USER_TASK:>
Description:
def from_url(cls, reactor, url, key, alg=RS256, jws_client=None):
"""
Construct a client from an ACME directory at a given URL.
:param url: The ``twisted.python.url.URL`` to fetch the directory from.
See `txacme.urls` for constants for various well-known public
directories.
:param reactor: The Twisted reactor to use.
:param ~josepy.jwk.JWK key: The client key to use.
:param alg: The signing algorithm to use. Needs to be compatible with
the type of key used.
:param JWSClient jws_client: The underlying client to use, or ``None``
to construct one.
:return: The constructed client.
:rtype: Deferred[`Client`]
""" |
action = LOG_ACME_CONSUME_DIRECTORY(
url=url, key_type=key.typ, alg=alg.name)
with action.context():
check_directory_url_type(url)
jws_client = _default_client(jws_client, reactor, key, alg)
return (
DeferredContext(jws_client.get(url.asText()))
.addCallback(json_content)
.addCallback(messages.Directory.from_json)
.addCallback(
tap(lambda d: action.add_success_fields(directory=d)))
.addCallback(cls, reactor, key, jws_client)
.addActionFinish()) |
<SYSTEM_TASK:>
Create a new registration with the ACME server.
<END_TASK>
<USER_TASK:>
Description:
def register(self, new_reg=None):
"""
Create a new registration with the ACME server.
:param ~acme.messages.NewRegistration new_reg: The registration message
to use, or ``None`` to construct one.
:return: The registration resource.
:rtype: Deferred[`~acme.messages.RegistrationResource`]
""" |
if new_reg is None:
new_reg = messages.NewRegistration()
action = LOG_ACME_REGISTER(registration=new_reg)
with action.context():
return (
DeferredContext(
self.update_registration(
new_reg, uri=self.directory[new_reg]))
.addErrback(self._maybe_registered, new_reg)
.addCallback(
tap(lambda r: action.add_success_fields(registration=r)))
.addActionFinish()) |
<SYSTEM_TASK:>
If the registration already exists, we should just load it.
<END_TASK>
<USER_TASK:>
Description:
def _maybe_registered(self, failure, new_reg):
"""
If the registration already exists, we should just load it.
""" |
failure.trap(ServerError)
response = failure.value.response
if response.code == http.CONFLICT:
reg = new_reg.update(
resource=messages.UpdateRegistration.resource_type)
uri = self._maybe_location(response)
return self.update_registration(reg, uri=uri)
return failure |
<SYSTEM_TASK:>
Accept the terms-of-service for a registration.
<END_TASK>
<USER_TASK:>
Description:
def agree_to_tos(self, regr):
"""
Accept the terms-of-service for a registration.
:param ~acme.messages.RegistrationResource regr: The registration to
update.
:return: The updated registration resource.
:rtype: Deferred[`~acme.messages.RegistrationResource`]
""" |
return self.update_registration(
regr.update(
body=regr.body.update(
agreement=regr.terms_of_service))) |
<SYSTEM_TASK:>
Submit a registration to the server to update it.
<END_TASK>
<USER_TASK:>
Description:
def update_registration(self, regr, uri=None):
"""
Submit a registration to the server to update it.
:param ~acme.messages.RegistrationResource regr: The registration to
update. Can be a :class:`~acme.messages.NewRegistration` instead,
in order to create a new registration.
:param str uri: The url to submit to. Must be
specified if a :class:`~acme.messages.NewRegistration` is provided.
:return: The updated registration resource.
:rtype: Deferred[`~acme.messages.RegistrationResource`]
""" |
if uri is None:
uri = regr.uri
if isinstance(regr, messages.RegistrationResource):
message = messages.UpdateRegistration(**dict(regr.body))
else:
message = regr
action = LOG_ACME_UPDATE_REGISTRATION(uri=uri, registration=message)
with action.context():
return (
DeferredContext(self._client.post(uri, message))
.addCallback(self._parse_regr_response, uri=uri)
.addCallback(self._check_regr, regr)
.addCallback(
tap(lambda r: action.add_success_fields(registration=r)))
.addActionFinish()) |
<SYSTEM_TASK:>
Parse a registration response from the server.
<END_TASK>
<USER_TASK:>
Description:
def _parse_regr_response(self, response, uri=None, new_authzr_uri=None,
terms_of_service=None):
"""
Parse a registration response from the server.
""" |
links = _parse_header_links(response)
if u'terms-of-service' in links:
terms_of_service = links[u'terms-of-service'][u'url']
if u'next' in links:
new_authzr_uri = links[u'next'][u'url']
if new_authzr_uri is None:
raise errors.ClientError('"next" link missing')
return (
response.json()
.addCallback(
lambda body:
messages.RegistrationResource(
body=messages.Registration.from_json(body),
uri=self._maybe_location(response, uri=uri),
new_authzr_uri=new_authzr_uri,
terms_of_service=terms_of_service))
) |
<SYSTEM_TASK:>
Check that a registration response contains the registration we were
<END_TASK>
<USER_TASK:>
Description:
def _check_regr(self, regr, new_reg):
"""
Check that a registration response contains the registration we were
expecting.
""" |
body = getattr(new_reg, 'body', new_reg)
for k, v in body.items():
if k == 'resource' or not v:
continue
if regr.body[k] != v:
raise errors.UnexpectedUpdate(regr)
if regr.body.key != self.key.public_key():
raise errors.UnexpectedUpdate(regr)
return regr |
<SYSTEM_TASK:>
Create a new authorization.
<END_TASK>
<USER_TASK:>
Description:
def request_challenges(self, identifier):
"""
Create a new authorization.
:param ~acme.messages.Identifier identifier: The identifier to
authorize.
:return: The new authorization resource.
:rtype: Deferred[`~acme.messages.AuthorizationResource`]
""" |
action = LOG_ACME_CREATE_AUTHORIZATION(identifier=identifier)
with action.context():
message = messages.NewAuthorization(identifier=identifier)
return (
DeferredContext(
self._client.post(self.directory[message], message))
.addCallback(self._expect_response, http.CREATED)
.addCallback(self._parse_authorization)
.addCallback(self._check_authorization, identifier)
.addCallback(
tap(lambda a: action.add_success_fields(authorization=a)))
.addActionFinish()) |
<SYSTEM_TASK:>
Ensure we got the expected response code.
<END_TASK>
<USER_TASK:>
Description:
def _expect_response(cls, response, code):
"""
Ensure we got the expected response code.
""" |
if response.code != code:
raise errors.ClientError(
'Expected {!r} response but got {!r}'.format(
code, response.code))
return response |
<SYSTEM_TASK:>
Check that the authorization we got is the one we expected.
<END_TASK>
<USER_TASK:>
Description:
def _check_authorization(cls, authzr, identifier):
"""
Check that the authorization we got is the one we expected.
""" |
if authzr.body.identifier != identifier:
raise errors.UnexpectedUpdate(authzr)
return authzr |
<SYSTEM_TASK:>
Respond to an authorization challenge.
<END_TASK>
<USER_TASK:>
Description:
def answer_challenge(self, challenge_body, response):
"""
Respond to an authorization challenge.
:param ~acme.messages.ChallengeBody challenge_body: The challenge being
responded to.
:param ~acme.challenges.ChallengeResponse response: The response to the
challenge.
:return: The updated challenge resource.
:rtype: Deferred[`~acme.messages.ChallengeResource`]
""" |
action = LOG_ACME_ANSWER_CHALLENGE(
challenge_body=challenge_body, response=response)
with action.context():
return (
DeferredContext(
self._client.post(challenge_body.uri, response))
.addCallback(self._parse_challenge)
.addCallback(self._check_challenge, challenge_body)
.addCallback(
tap(lambda c:
action.add_success_fields(challenge_resource=c)))
.addActionFinish()) |
<SYSTEM_TASK:>
Check that the challenge resource we got is the one we expected.
<END_TASK>
<USER_TASK:>
Description:
def _check_challenge(cls, challenge, challenge_body):
"""
Check that the challenge resource we got is the one we expected.
""" |
if challenge.uri != challenge_body.uri:
raise errors.UnexpectedUpdate(challenge.uri)
return challenge |
<SYSTEM_TASK:>
Parse the Retry-After value from a response.
<END_TASK>
<USER_TASK:>
Description:
def retry_after(cls, response, default=5, _now=time.time):
"""
Parse the Retry-After value from a response.
""" |
val = response.headers.getRawHeaders(b'retry-after', [default])[0]
try:
return int(val)
except ValueError:
return http.stringToDatetime(val) - _now() |
<SYSTEM_TASK:>
Request a certificate.
<END_TASK>
<USER_TASK:>
Description:
def request_issuance(self, csr):
"""
Request a certificate.
Authorizations should have already been completed for all of the names
requested in the CSR.
Note that unlike `acme.client.Client.request_issuance`, the certificate
resource will have the body data as raw bytes.
.. seealso:: `txacme.util.csr_for_names`
.. todo:: Delayed issuance is not currently supported, the server must
issue the requested certificate immediately.
:param csr: A certificate request message: normally
`txacme.messages.CertificateRequest` or
`acme.messages.CertificateRequest`.
:rtype: Deferred[`acme.messages.CertificateResource`]
:return: The issued certificate.
""" |
action = LOG_ACME_REQUEST_CERTIFICATE()
with action.context():
return (
DeferredContext(
self._client.post(
self.directory[csr], csr,
content_type=DER_CONTENT_TYPE,
headers=Headers({b'Accept': [DER_CONTENT_TYPE]})))
.addCallback(self._expect_response, http.CREATED)
.addCallback(self._parse_certificate)
.addActionFinish()) |
<SYSTEM_TASK:>
Parse a response containing a certificate resource.
<END_TASK>
<USER_TASK:>
Description:
def _parse_certificate(cls, response):
"""
Parse a response containing a certificate resource.
""" |
links = _parse_header_links(response)
try:
cert_chain_uri = links[u'up'][u'url']
except KeyError:
cert_chain_uri = None
return (
response.content()
.addCallback(
lambda body: messages.CertificateResource(
uri=cls._maybe_location(response),
cert_chain_uri=cert_chain_uri,
body=body))
) |
<SYSTEM_TASK:>
Fetch the intermediary chain for a certificate.
<END_TASK>
<USER_TASK:>
Description:
def fetch_chain(self, certr, max_length=10):
"""
Fetch the intermediary chain for a certificate.
:param acme.messages.CertificateResource certr: The certificate to
fetch the chain for.
:param int max_length: The maximum length of the chain that will be
fetched.
:rtype: Deferred[List[`acme.messages.CertificateResource`]]
:return: The issuer certificate chain, ordered with the trust anchor
last.
""" |
action = LOG_ACME_FETCH_CHAIN()
with action.context():
if certr.cert_chain_uri is None:
return succeed([])
elif max_length < 1:
raise errors.ClientError('chain too long')
return (
DeferredContext(
self._client.get(
certr.cert_chain_uri,
content_type=DER_CONTENT_TYPE,
headers=Headers({b'Accept': [DER_CONTENT_TYPE]})))
.addCallback(self._parse_certificate)
.addCallback(
lambda issuer:
self.fetch_chain(issuer, max_length=max_length - 1)
.addCallback(lambda chain: [issuer] + chain))
.addActionFinish()) |
<SYSTEM_TASK:>
Check response content and its type.
<END_TASK>
<USER_TASK:>
Description:
def _check_response(cls, response, content_type=JSON_CONTENT_TYPE):
"""
Check response content and its type.
.. note::
Unlike :mod:`acme.client`, checking is strict.
:param bytes content_type: Expected Content-Type response header. If
the response Content-Type does not match, :exc:`ClientError` is
raised.
:raises .ServerError: If server response body carries HTTP Problem
(draft-ietf-appsawg-http-problem-00).
:raises ~acme.errors.ClientError: In case of other networking errors.
""" |
def _got_failure(f):
f.trap(ValueError)
return None
def _got_json(jobj):
if 400 <= response.code < 600:
if response_ct == JSON_ERROR_CONTENT_TYPE and jobj is not None:
raise ServerError(
messages.Error.from_json(jobj), response)
else:
# response is not JSON object
raise errors.ClientError(response)
elif response_ct != content_type:
raise errors.ClientError(
'Unexpected response Content-Type: {0!r}'.format(
response_ct))
elif content_type == JSON_CONTENT_TYPE and jobj is None:
raise errors.ClientError(response)
return response
response_ct = response.headers.getRawHeaders(
b'Content-Type', [None])[0]
action = LOG_JWS_CHECK_RESPONSE(
expected_content_type=content_type,
response_content_type=response_ct)
with action.context():
# TODO: response.json() is called twice, once here, and
# once in _get and _post clients
return (
DeferredContext(response.json())
.addErrback(_got_failure)
.addCallback(_got_json)
.addActionFinish()) |
<SYSTEM_TASK:>
Send HEAD request without checking the response.
<END_TASK>
<USER_TASK:>
Description:
def head(self, url, *args, **kwargs):
"""
Send HEAD request without checking the response.
Note that ``_check_response`` is not called, as there will be no
response body to check.
:param str url: The URL to make the request to.
""" |
with LOG_JWS_HEAD().context():
return DeferredContext(
self._send_request(u'HEAD', url, *args, **kwargs)
).addActionFinish() |
<SYSTEM_TASK:>
Send GET request and check response.
<END_TASK>
<USER_TASK:>
Description:
def get(self, url, content_type=JSON_CONTENT_TYPE, **kwargs):
"""
Send GET request and check response.
:param str method: The HTTP method to use.
:param str url: The URL to make the request to.
:raises txacme.client.ServerError: If server response body carries HTTP
Problem (draft-ietf-appsawg-http-problem-00).
:raises acme.errors.ClientError: In case of other protocol errors.
:return: Deferred firing with the checked HTTP response.
""" |
with LOG_JWS_GET().context():
return (
DeferredContext(self._send_request(u'GET', url, **kwargs))
.addCallback(self._check_response, content_type=content_type)
.addActionFinish()) |
<SYSTEM_TASK:>
Store a nonce from a response we received.
<END_TASK>
<USER_TASK:>
Description:
def _add_nonce(self, response):
"""
Store a nonce from a response we received.
:param twisted.web.iweb.IResponse response: The HTTP response.
:return: The response, unmodified.
""" |
nonce = response.headers.getRawHeaders(
REPLAY_NONCE_HEADER, [None])[0]
with LOG_JWS_ADD_NONCE(raw_nonce=nonce) as action:
if nonce is None:
raise errors.MissingNonce(response)
else:
try:
decoded_nonce = Header._fields['nonce'].decode(
nonce.decode('ascii')
)
action.add_success_fields(nonce=decoded_nonce)
except DeserializationError as error:
raise errors.BadNonce(nonce, error)
self._nonces.add(decoded_nonce)
return response |
<SYSTEM_TASK:>
Get a nonce to use in a request, removing it from the nonces on hand.
<END_TASK>
<USER_TASK:>
Description:
def _get_nonce(self, url):
"""
Get a nonce to use in a request, removing it from the nonces on hand.
""" |
action = LOG_JWS_GET_NONCE()
if len(self._nonces) > 0:
with action:
nonce = self._nonces.pop()
action.add_success_fields(nonce=nonce)
return succeed(nonce)
else:
with action.context():
return (
DeferredContext(self.head(url))
.addCallback(self._add_nonce)
.addCallback(lambda _: self._nonces.pop())
.addCallback(tap(
lambda nonce: action.add_success_fields(nonce=nonce)))
.addActionFinish()) |
<SYSTEM_TASK:>
POST an object and check the response.
<END_TASK>
<USER_TASK:>
Description:
def _post(self, url, obj, content_type, **kwargs):
"""
POST an object and check the response.
:param str url: The URL to request.
:param ~josepy.interfaces.JSONDeSerializable obj: The serializable
payload of the request.
:param bytes content_type: The expected content type of the response.
:raises txacme.client.ServerError: If server response body carries HTTP
Problem (draft-ietf-appsawg-http-problem-00).
:raises acme.errors.ClientError: In case of other protocol errors.
""" |
with LOG_JWS_POST().context():
headers = kwargs.setdefault('headers', Headers())
headers.setRawHeaders(b'content-type', [JSON_CONTENT_TYPE])
return (
DeferredContext(self._get_nonce(url))
.addCallback(self._wrap_in_jws, obj)
.addCallback(
lambda data: self._send_request(
u'POST', url, data=data, **kwargs))
.addCallback(self._add_nonce)
.addCallback(self._check_response, content_type=content_type)
.addActionFinish()) |
<SYSTEM_TASK:>
POST an object and check the response. Retry once if a badNonce error
<END_TASK>
<USER_TASK:>
Description:
def post(self, url, obj, content_type=JSON_CONTENT_TYPE, **kwargs):
"""
POST an object and check the response. Retry once if a badNonce error
is received.
:param str url: The URL to request.
:param ~josepy.interfaces.JSONDeSerializable obj: The serializable
payload of the request.
:param bytes content_type: The expected content type of the response.
By default, JSON.
:raises txacme.client.ServerError: If server response body carries HTTP
Problem (draft-ietf-appsawg-http-problem-00).
:raises acme.errors.ClientError: In case of other protocol errors.
""" |
def retry_bad_nonce(f):
f.trap(ServerError)
# The current RFC draft defines the namespace as
# urn:ietf:params:acme:error:<code>, but earlier drafts (and some
# current implementations) use urn:acme:error:<code> instead. We
# don't really care about the namespace here, just the error code.
if f.value.message.typ.split(':')[-1] == 'badNonce':
# If one nonce is bad, others likely are too. Let's clear them
# and re-add the one we just got.
self._nonces.clear()
self._add_nonce(f.value.response)
return self._post(url, obj, content_type, **kwargs)
return f
return (
self._post(url, obj, content_type, **kwargs)
.addErrback(retry_bad_nonce)) |
<SYSTEM_TASK:>
Run a task in a worker, delivering the result as a ``Deferred`` in the
<END_TASK>
<USER_TASK:>
Description:
def _defer_to_worker(deliver, worker, work, *args, **kwargs):
"""
Run a task in a worker, delivering the result as a ``Deferred`` in the
reactor thread.
""" |
deferred = Deferred()
def wrapped_work():
try:
result = work(*args, **kwargs)
except BaseException:
f = Failure()
deliver(lambda: deferred.errback(f))
else:
deliver(lambda: deferred.callback(result))
worker.do(wrapped_work)
return deferred |
<SYSTEM_TASK:>
Split the zone portion off from a DNS label.
<END_TASK>
<USER_TASK:>
Description:
def _split_zone(server_name, zone_name):
"""
Split the zone portion off from a DNS label.
:param str server_name: The full DNS label.
:param str zone_name: The zone name suffix.
""" |
server_name = server_name.rstrip(u'.')
zone_name = zone_name.rstrip(u'.')
if not (server_name == zone_name or
server_name.endswith(u'.' + zone_name)):
raise NotInZone(server_name=server_name, zone_name=zone_name)
return server_name[:-len(zone_name)].rstrip(u'.') |
<SYSTEM_TASK:>
Get the validation value for a challenge response.
<END_TASK>
<USER_TASK:>
Description:
def _validation(response):
"""
Get the validation value for a challenge response.
""" |
h = hashlib.sha256(response.key_authorization.encode("utf-8"))
return b64encode(h.digest()).decode() |
<SYSTEM_TASK:>
Load the client key from a directory, creating it if it does not exist.
<END_TASK>
<USER_TASK:>
Description:
def load_or_create_client_key(pem_path):
"""
Load the client key from a directory, creating it if it does not exist.
.. note:: The client key that will be created will be a 2048-bit RSA key.
:type pem_path: ``twisted.python.filepath.FilePath``
:param pem_path: The certificate directory
to use, as with the endpoint.
""" |
acme_key_file = pem_path.asTextMode().child(u'client.key')
if acme_key_file.exists():
key = serialization.load_pem_private_key(
acme_key_file.getContent(),
password=None,
backend=default_backend())
else:
key = generate_private_key(u'rsa')
acme_key_file.setContent(
key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()))
return JWKRSA(key=key) |
<SYSTEM_TASK:>
Parse a txacme endpoint description.
<END_TASK>
<USER_TASK:>
Description:
def _parse(reactor, directory, pemdir, *args, **kwargs):
"""
Parse a txacme endpoint description.
:param reactor: The Twisted reactor.
:param directory: ``twisted.python.url.URL`` for the ACME directory to use
for issuing certs.
:param str pemdir: The path to the certificate directory to use.
""" |
def colon_join(items):
return ':'.join([item.replace(':', '\\:') for item in items])
sub = colon_join(list(args) + ['='.join(item) for item in kwargs.items()])
pem_path = FilePath(pemdir).asTextMode()
acme_key = load_or_create_client_key(pem_path)
return AutoTLSEndpoint(
reactor=reactor,
directory=directory,
client_creator=partial(Client.from_url, key=acme_key, alg=RS256),
cert_store=DirectoryStore(pem_path),
cert_mapping=HostDirectoryMap(pem_path),
sub_endpoint=serverFromString(reactor, sub)) |
<SYSTEM_TASK:>
Generator which continually reads ``f`` to the next instance
<END_TASK>
<USER_TASK:>
Description:
def lazyread(f, delimiter):
"""
Generator which continually reads ``f`` to the next instance
of ``delimiter``.
This allows you to do batch processing on the contents of ``f`` without
loading the entire file into memory.
:param f: Any file-like object which has a ``.read()`` method.
:param delimiter: Delimiter on which to split up the file.
""" |
# Get an empty string to start with. We need to make sure that if the
# file is opened in binary mode, we're using byte strings, and similar
# for Unicode. Otherwise trying to update the running string will
# hit a TypeError.
try:
running = f.read(0)
except Exception as e:
# The boto3 APIs don't let you read zero bytes from an S3 object, but
# they always return bytestrings, so in this case we know what to
# start with.
if e.__class__.__name__ == 'IncompleteReadError':
running = b''
else:
raise
while True:
new_data = f.read(1024)
# When a call to read() returns nothing, we're at the end of the file.
if not new_data:
yield running
return
# Otherwise, update the running stream and look for instances of
# the delimiter. Remember we might have read more than one delimiter
# since the last time we checked
running += new_data
while delimiter in running:
curr, running = running.split(delimiter, 1)
yield curr + delimiter |
<SYSTEM_TASK:>
Generate a random private key using sensible parameters.
<END_TASK>
<USER_TASK:>
Description:
def generate_private_key(key_type):
"""
Generate a random private key using sensible parameters.
:param str key_type: The type of key to generate. One of: ``rsa``.
""" |
if key_type == u'rsa':
return rsa.generate_private_key(
public_exponent=65537, key_size=2048, backend=default_backend())
raise ValueError(key_type) |
<SYSTEM_TASK:>
"Tap" a Deferred callback chain with a function whose return value is
<END_TASK>
<USER_TASK:>
Description:
def tap(f):
"""
"Tap" a Deferred callback chain with a function whose return value is
ignored.
""" |
@wraps(f)
def _cb(res, *a, **kw):
d = maybeDeferred(f, res, *a, **kw)
d.addCallback(lambda ignored: res)
return d
return _cb |
<SYSTEM_TASK:>
Generate a certificate signing request for the given names and private key.
<END_TASK>
<USER_TASK:>
Description:
def csr_for_names(names, key):
"""
Generate a certificate signing request for the given names and private key.
.. seealso:: `acme.client.Client.request_issuance`
.. seealso:: `generate_private_key`
:param ``List[str]``: One or more names (subjectAltName) for which to
request a certificate.
:param key: A Cryptography private key object.
:rtype: `cryptography.x509.CertificateSigningRequest`
:return: The certificate request message.
""" |
if len(names) == 0:
raise ValueError('Must have at least one name')
if len(names[0]) > 64:
common_name = u'san.too.long.invalid'
else:
common_name = names[0]
return (
x509.CertificateSigningRequestBuilder()
.subject_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, common_name)]))
.add_extension(
x509.SubjectAlternativeName(list(map(x509.DNSName, names))),
critical=False)
.sign(key, hashes.SHA256(), default_backend())) |
<SYSTEM_TASK:>
async wrapper is required to avoid await calls raising a SyntaxError
<END_TASK>
<USER_TASK:>
Description:
def _wrap_parse(code, filename):
"""
async wrapper is required to avoid await calls raising a SyntaxError
""" |
code = 'async def wrapper():\n' + indent(code, ' ')
return ast.parse(code, filename=filename).body[0].body[0].value |
<SYSTEM_TASK:>
Clear all indexes in the solr core
<END_TASK>
<USER_TASK:>
Description:
def clear_solr(self, catalog="hypermap"):
"""Clear all indexes in the solr core""" |
solr_url = "{0}/solr/{1}".format(SEARCH_URL, catalog)
solr = pysolr.Solr(solr_url, timeout=60)
solr.delete(q='*:*')
LOGGER.debug('Solr core cleared') |
<SYSTEM_TASK:>
Create a service from an endpoint if it does not already exists.
<END_TASK>
<USER_TASK:>
Description:
def create_service_from_endpoint(endpoint, service_type, title=None, abstract=None, catalog=None):
"""
Create a service from an endpoint if it does not already exists.
""" |
from models import Service
if Service.objects.filter(url=endpoint, catalog=catalog).count() == 0:
# check if endpoint is valid
request = requests.get(endpoint)
if request.status_code == 200:
LOGGER.debug('Creating a %s service for endpoint=%s catalog=%s' % (service_type, endpoint, catalog))
service = Service(
type=service_type, url=endpoint, title=title, abstract=abstract,
csw_type='service', catalog=catalog
)
service.save()
return service
else:
LOGGER.warning('This endpoint is invalid, status code is %s' % request.status_code)
else:
LOGGER.warning('A service for this endpoint %s in catalog %s already exists' % (endpoint, catalog))
return None |
<SYSTEM_TASK:>
Function that parses from url the service and folder of services.
<END_TASK>
<USER_TASK:>
Description:
def service_url_parse(url):
"""
Function that parses from url the service and folder of services.
""" |
endpoint = get_sanitized_endpoint(url)
url_split_list = url.split(endpoint + '/')
if len(url_split_list) != 0:
url_split_list = url_split_list[1].split('/')
else:
raise Exception('Wrong url parsed')
# Remove unnecessary items from list of the split url.
parsed_url = [s for s in url_split_list if '?' not in s if 'Server' not in s]
return parsed_url |
<SYSTEM_TASK:>
OWSLib wrapper function to perform version negotiation against owslib.wms.WebMapService
<END_TASK>
<USER_TASK:>
Description:
def get_wms_version_negotiate(url, timeout=10):
"""
OWSLib wrapper function to perform version negotiation against owslib.wms.WebMapService
""" |
try:
LOGGER.debug('Trying a WMS 1.3.0 GetCapabilities request')
return WebMapService(url, version='1.3.0', timeout=timeout)
except Exception as err:
LOGGER.warning('WMS 1.3.0 support not found: %s', err)
LOGGER.debug('Trying a WMS 1.1.1 GetCapabilities request instead')
return WebMapService(url, version='1.1.1', timeout=timeout) |
<SYSTEM_TASK:>
Sanitize an endpoint, as removing unneeded parameters
<END_TASK>
<USER_TASK:>
Description:
def get_sanitized_endpoint(url):
"""
Sanitize an endpoint, as removing unneeded parameters
""" |
# sanitize esri
sanitized_url = url.rstrip()
esri_string = '/rest/services'
if esri_string in url:
match = re.search(esri_string, sanitized_url)
sanitized_url = url[0:(match.start(0)+len(esri_string))]
return sanitized_url |
<SYSTEM_TASK:>
Returns a date in a valid Solr format from a string.
<END_TASK>
<USER_TASK:>
Description:
def get_solr_date(pydate, is_negative):
"""
Returns a date in a valid Solr format from a string.
""" |
# check if date is valid and then set it to solr format YYYY-MM-DDThh:mm:ssZ
try:
if isinstance(pydate, datetime.datetime):
solr_date = '%sZ' % pydate.isoformat()[0:19]
if is_negative:
LOGGER.debug('%s This layer has a negative date' % solr_date)
solr_date = '-%s' % solr_date
return solr_date
else:
return None
except Exception, e:
LOGGER.error(e, exc_info=True)
return None |
<SYSTEM_TASK:>
Returns a custom date representation. A date can be detected or from metadata.
<END_TASK>
<USER_TASK:>
Description:
def get_date(layer):
"""
Returns a custom date representation. A date can be detected or from metadata.
It can be a range or a simple date in isoformat.
""" |
date = None
sign = '+'
date_type = 1
layer_dates = layer.get_layer_dates()
# we index the first date!
if layer_dates:
sign = layer_dates[0][0]
date = layer_dates[0][1]
date_type = layer_dates[0][2]
if date is None:
date = layer.created
# layer date > 2300 is invalid for sure
# TODO put this logic in date miner
if date.year > 2300:
date = None
if date_type == 0:
date_type = "Detected"
if date_type == 1:
date_type = "From Metadata"
return get_solr_date(date, (sign == '-')), date_type |
<SYSTEM_TASK:>
detect whether a url is a Service type that HHypermap supports
<END_TASK>
<USER_TASK:>
Description:
def detect_metadata_url_scheme(url):
"""detect whether a url is a Service type that HHypermap supports""" |
scheme = None
url_lower = url.lower()
if any(x in url_lower for x in ['wms', 'service=wms']):
scheme = 'OGC:WMS'
if any(x in url_lower for x in ['wmts', 'service=wmts']):
scheme = 'OGC:WMTS'
elif all(x in url for x in ['/MapServer', 'f=json']):
scheme = 'ESRI:ArcGIS:MapServer'
elif all(x in url for x in ['/ImageServer', 'f=json']):
scheme = 'ESRI:ArcGIS:ImageServer'
return scheme |
<SYSTEM_TASK:>
Serialize a check_set for raphael
<END_TASK>
<USER_TASK:>
Description:
def serialize_checks(check_set):
"""
Serialize a check_set for raphael
""" |
check_set_list = []
for check in check_set.all()[:25]:
check_set_list.append(
{
'datetime': check.checked_datetime.isoformat(),
'value': check.response_time,
'success': 1 if check.success else 0
}
)
return check_set_list |
<SYSTEM_TASK:>
A page with number of services and layers faceted on domains.
<END_TASK>
<USER_TASK:>
Description:
def domains(request):
"""
A page with number of services and layers faceted on domains.
""" |
url = ''
query = '*:*&facet=true&facet.limit=-1&facet.pivot=domain_name,service_id&wt=json&indent=true&rows=0'
if settings.SEARCH_TYPE == 'elasticsearch':
url = '%s/select?q=%s' % (settings.SEARCH_URL, query)
if settings.SEARCH_TYPE == 'solr':
url = '%s/solr/hypermap/select?q=%s' % (settings.SEARCH_URL, query)
LOGGER.debug(url)
response = urllib2.urlopen(url)
data = response.read().replace('\n', '')
# stats
layers_count = Layer.objects.all().count()
services_count = Service.objects.all().count()
template = loader.get_template('aggregator/index.html')
context = RequestContext(request, {
'data': data,
'layers_count': layers_count,
'services_count': services_count,
})
return HttpResponse(template.render(context)) |
<SYSTEM_TASK:>
A page that let the admin to run global tasks.
<END_TASK>
<USER_TASK:>
Description:
def tasks_runner(request):
"""
A page that let the admin to run global tasks.
""" |
# server info
cached_layers_number = 0
cached_layers = cache.get('layers')
if cached_layers:
cached_layers_number = len(cached_layers)
cached_deleted_layers_number = 0
cached_deleted_layers = cache.get('deleted_layers')
if cached_deleted_layers:
cached_deleted_layers_number = len(cached_deleted_layers)
# task actions
if request.method == 'POST':
if 'check_all' in request.POST:
if settings.REGISTRY_SKIP_CELERY:
check_all_services()
else:
check_all_services.delay()
if 'index_all' in request.POST:
if settings.REGISTRY_SKIP_CELERY:
index_all_layers()
else:
index_all_layers.delay()
if 'index_cached' in request.POST:
if settings.REGISTRY_SKIP_CELERY:
index_cached_layers()
else:
index_cached_layers.delay()
if 'drop_cached' in request.POST:
cache.set('layers', None)
cache.set('deleted_layers', None)
if 'clear_index' in request.POST:
if settings.REGISTRY_SKIP_CELERY:
clear_index()
else:
clear_index.delay()
if 'remove_index' in request.POST:
if settings.REGISTRY_SKIP_CELERY:
unindex_layers_with_issues()
else:
unindex_layers_with_issues.delay()
return render(
request,
'aggregator/tasks_runner.html', {
'cached_layers_number': cached_layers_number,
'cached_deleted_layers_number': cached_deleted_layers_number,
}
) |
<SYSTEM_TASK:>
Get Layer with matching catalog and uuid
<END_TASK>
<USER_TASK:>
Description:
def layer_mapproxy(request, catalog_slug, layer_uuid, path_info):
"""
Get Layer with matching catalog and uuid
""" |
layer = get_object_or_404(Layer,
uuid=layer_uuid,
catalog__slug=catalog_slug)
# for WorldMap layers we need to use the url of the layer
if layer.service.type == 'Hypermap:WorldMap':
layer.service.url = layer.url
# Set up a mapproxy app for this particular layer
mp, yaml_config = get_mapproxy(layer)
query = request.META['QUERY_STRING']
if len(query) > 0:
path_info = path_info + '?' + query
params = {}
headers = {
'X-Script-Name': '/registry/{0}/layer/{1}/map/'.format(catalog_slug, layer.id),
'X-Forwarded-Host': request.META['HTTP_HOST'],
'HTTP_HOST': request.META['HTTP_HOST'],
'SERVER_NAME': request.META['SERVER_NAME'],
}
if path_info == '/config':
response = HttpResponse(yaml_config, content_type='text/plain')
return response
# Get a response from MapProxy as if it was running standalone.
mp_response = mp.get(path_info, params, headers)
# Create a Django response from the MapProxy WSGI response.
response = HttpResponse(mp_response.body, status=mp_response.status_int)
for header, value in mp_response.headers.iteritems():
response[header] = value
return response |
<SYSTEM_TASK:>
Parses a date string to date object.
<END_TASK>
<USER_TASK:>
Description:
def parse_datetime(date_str):
"""
Parses a date string to date object.
for BCE dates, only supports the year part.
""" |
is_common_era = True
date_str_parts = date_str.split("-")
if date_str_parts and date_str_parts[0] == '':
is_common_era = False
# for now, only support BCE years
# assume the datetime comes complete, but
# when it comes only the year, add the missing datetime info:
if len(date_str_parts) == 2:
date_str = date_str + "-01-01T00:00:00Z"
parsed_datetime = {
'is_common_era': is_common_era,
'parsed_datetime': None
}
if is_common_era:
if date_str == '*':
return parsed_datetime # open ended.
default = datetime.datetime.now().replace(
hour=0, minute=0, second=0, microsecond=0,
day=1, month=1
)
parsed_datetime['parsed_datetime'] = parse(date_str, default=default)
return parsed_datetime
parsed_datetime['parsed_datetime'] = date_str
return parsed_datetime |
<SYSTEM_TASK:>
Query by list of identifiers
<END_TASK>
<USER_TASK:>
Description:
def query_ids(self, ids):
"""
Query by list of identifiers
""" |
results = self._get_repo_filter(Layer.objects).filter(uuid__in=ids).all()
if len(results) == 0: # try services
results = self._get_repo_filter(Service.objects).filter(uuid__in=ids).all()
return results |
<SYSTEM_TASK:>
Query by property domain values
<END_TASK>
<USER_TASK:>
Description:
def query_domain(self, domain, typenames, domainquerytype='list', count=False):
"""
Query by property domain values
""" |
objects = self._get_repo_filter(Layer.objects)
if domainquerytype == 'range':
return [tuple(objects.aggregate(Min(domain), Max(domain)).values())]
else:
if count:
return [(d[domain], d['%s__count' % domain])
for d in objects.values(domain).annotate(Count(domain))]
else:
return objects.values_list(domain).distinct() |
<SYSTEM_TASK:>
Query records from underlying repository
<END_TASK>
<USER_TASK:>
Description:
def query(self, constraint, sortby=None, typenames=None, maxrecords=10, startposition=0):
"""
Query records from underlying repository
""" |
# run the raw query and get total
# we want to exclude layers which are not valid, as it is done in the search engine
if 'where' in constraint: # GetRecords with constraint
query = self._get_repo_filter(Layer.objects).filter(
is_valid=True).extra(where=[constraint['where']], params=constraint['values'])
else: # GetRecords sans constraint
query = self._get_repo_filter(Layer.objects).filter(is_valid=True)
total = query.count()
# apply sorting, limit and offset
if sortby is not None:
if 'spatial' in sortby and sortby['spatial']: # spatial sort
desc = False
if sortby['order'] == 'DESC':
desc = True
query = query.all()
return [str(total),
sorted(query,
key=lambda x: float(util.get_geometry_area(getattr(x, sortby['propertyname']))),
reverse=desc,
)[startposition:startposition+int(maxrecords)]]
else:
if sortby['order'] == 'DESC':
pname = '-%s' % sortby['propertyname']
else:
pname = sortby['propertyname']
return [str(total),
query.order_by(pname)[startposition:startposition+int(maxrecords)]]
else: # no sort
return [str(total), query.all()[startposition:startposition+int(maxrecords)]] |
<SYSTEM_TASK:>
Insert a record into the repository
<END_TASK>
<USER_TASK:>
Description:
def insert(self, resourcetype, source, insert_date=None):
"""
Insert a record into the repository
""" |
caller = inspect.stack()[1][3]
if caller == 'transaction': # insert of Layer
hhclass = 'Layer'
source = resourcetype
resourcetype = resourcetype.csw_schema
else: # insert of service
hhclass = 'Service'
if resourcetype not in HYPERMAP_SERVICE_TYPES.keys():
raise RuntimeError('Unsupported Service Type')
return self._insert_or_update(resourcetype, source, mode='insert', hhclass=hhclass) |
<SYSTEM_TASK:>
Insert or update a record in the repository
<END_TASK>
<USER_TASK:>
Description:
def _insert_or_update(self, resourcetype, source, mode='insert', hhclass='Service'):
"""
Insert or update a record in the repository
""" |
keywords = []
if self.filter is not None:
catalog = Catalog.objects.get(id=int(self.filter.split()[-1]))
try:
if hhclass == 'Layer':
# TODO: better way of figuring out duplicates
match = Layer.objects.filter(name=source.name,
title=source.title,
abstract=source.abstract,
is_monitored=False)
matches = match.all()
if matches:
if mode == 'insert':
raise RuntimeError('HHypermap error: Layer %d \'%s\' already exists' % (
matches[0].id, source.title))
elif mode == 'update':
match.update(
name=source.name,
title=source.title,
abstract=source.abstract,
is_monitored=False,
xml=source.xml,
wkt_geometry=source.wkt_geometry,
anytext=util.get_anytext([source.title, source.abstract, source.keywords_csv])
)
service = get_service(source.xml)
res, keywords = create_layer_from_metadata_xml(resourcetype, source.xml,
monitor=False, service=service,
catalog=catalog)
res.save()
LOGGER.debug('Indexing layer with id %s on search engine' % res.uuid)
index_layer(res.id, use_cache=True)
else:
if resourcetype == 'http://www.opengis.net/cat/csw/2.0.2':
res = Endpoint(url=source, catalog=catalog)
else:
res = Service(type=HYPERMAP_SERVICE_TYPES[resourcetype], url=source, catalog=catalog)
res.save()
if keywords:
for kw in keywords:
res.keywords.add(kw)
except Exception as err:
raise RuntimeError('HHypermap error: %s' % err)
# return a list of ids that were inserted or updated
ids = []
if hhclass == 'Layer':
ids.append({'identifier': res.uuid, 'title': res.title})
else:
if resourcetype == 'http://www.opengis.net/cat/csw/2.0.2':
for res in Endpoint.objects.filter(url=source).all():
ids.append({'identifier': res.uuid, 'title': res.url})
else:
for res in Service.objects.filter(url=source).all():
ids.append({'identifier': res.uuid, 'title': res.title})
return ids |
<SYSTEM_TASK:>
Delete a record from the repository
<END_TASK>
<USER_TASK:>
Description:
def delete(self, constraint):
"""
Delete a record from the repository
""" |
results = self._get_repo_filter(Service.objects).extra(where=[constraint['where']],
params=constraint['values']).all()
deleted = len(results)
results.delete()
return deleted |
<SYSTEM_TASK:>
This registers any model class to be follow-able.
<END_TASK>
<USER_TASK:>
Description:
def register(model, field_name=None, related_name=None, lookup_method_name='get_follows'):
"""
This registers any model class to be follow-able.
""" |
if model in registry:
return
registry.append(model)
if not field_name:
field_name = 'target_%s' % model._meta.module_name
if not related_name:
related_name = 'follow_%s' % model._meta.module_name
field = ForeignKey(model, related_name=related_name, null=True,
blank=True, db_index=True)
field.contribute_to_class(Follow, field_name)
setattr(model, lookup_method_name, get_followers_for_object)
model_map[model] = [related_name, field_name] |
<SYSTEM_TASK:>
Make a user follow an object
<END_TASK>
<USER_TASK:>
Description:
def follow(user, obj):
""" Make a user follow an object """ |
follow, created = Follow.objects.get_or_create(user, obj)
return follow |
<SYSTEM_TASK:>
Create a new follow link between a user and an object
<END_TASK>
<USER_TASK:>
Description:
def create(self, user, obj, **kwargs):
"""
Create a new follow link between a user and an object
of a registered model type.
""" |
follow = Follow(user=user)
follow.target = obj
follow.save()
return follow |
<SYSTEM_TASK:>
Almost the same as `FollowManager.objects.create` - behaves the same
<END_TASK>
<USER_TASK:>
Description:
def get_or_create(self, user, obj, **kwargs):
"""
Almost the same as `FollowManager.objects.create` - behaves the same
as the normal `get_or_create` methods in django though.
Returns a tuple with the `Follow` and either `True` or `False`
""" |
if not self.is_following(user, obj):
return self.create(user, obj, **kwargs), True
return self.get_follows(obj).get(user=user), False |
<SYSTEM_TASK:>
Returns all the followers of a model, an object or a queryset.
<END_TASK>
<USER_TASK:>
Description:
def get_follows(self, model_or_obj_or_qs):
"""
Returns all the followers of a model, an object or a queryset.
""" |
fname = self.fname(model_or_obj_or_qs)
if isinstance(model_or_obj_or_qs, QuerySet):
return self.filter(**{'%s__in' % fname: model_or_obj_or_qs})
if inspect.isclass(model_or_obj_or_qs):
return self.exclude(**{fname:None})
return self.filter(**{fname:model_or_obj_or_qs}) |
<SYSTEM_TASK:>
create_event_regressors creates the part of the design matrix corresponding to one event type.
<END_TASK>
<USER_TASK:>
Description:
def create_event_regressors(self, event_times_indices, covariates = None, durations = None):
"""create_event_regressors creates the part of the design matrix corresponding to one event type.
:param event_times_indices: indices in the resampled data, on which the events occurred.
:type event_times_indices: numpy array, (nr_events)
:param covariates: covariates belonging to this event type. If None, covariates with a value of 1 for all events are created and used internally.
:type covariates: numpy array, (nr_events)
:param durations: durations belonging to this event type. If None, durations with a value of 1 sample for all events are created and used internally.
:type durations: numpy array, (nr_events)
:returns: This event type's part of the design matrix.
""" |
# check covariates
if covariates is None:
covariates = np.ones(self.event_times_indices.shape)
# check/create durations, convert from seconds to samples time, and compute mean duration for this event type.
if durations is None:
durations = np.ones(self.event_times_indices.shape)
else:
durations = np.round(durations*self.deconvolution_frequency).astype(int)
mean_duration = np.mean(durations)
# set up output array
regressors_for_event = np.zeros((self.deconvolution_interval_size, self.resampled_signal_size))
# fill up output array by looping over events.
for cov, eti, dur in zip(covariates, event_times_indices, durations):
valid = True
if eti < 0:
self.logger.debug('deconv samples are starting before the data starts.')
valid = False
if eti+self.deconvolution_interval_size > self.resampled_signal_size:
self.logger.debug('deconv samples are continuing after the data stops.')
valid = False
if eti > self.resampled_signal_size:
self.logger.debug('event falls outside of the scope of the data.')
valid = False
if valid: # only incorporate sensible events.
# calculate the design matrix that belongs to this event.
this_event_design_matrix = (np.diag(np.ones(self.deconvolution_interval_size)) * cov)
over_durations_dm = np.copy(this_event_design_matrix)
if dur > 1: # if this event has a non-unity duration, duplicate the stick regressors in the time direction
for d in np.arange(1,dur):
over_durations_dm[d:] += this_event_design_matrix[:-d]
# and correct for differences in durations between different regressor types.
over_durations_dm /= mean_duration
# add the designmatrix for this event to the full design matrix for this type of event.
regressors_for_event[:,eti:int(eti+self.deconvolution_interval_size)] += over_durations_dm
return regressors_for_event |
<SYSTEM_TASK:>
regress performs linear least squares regression of the designmatrix on the data.
<END_TASK>
<USER_TASK:>
Description:
def regress(self, method = 'lstsq'):
"""regress performs linear least squares regression of the designmatrix on the data.
:param method: method, or backend to be used for the regression analysis.
:type method: string, one of ['lstsq', 'sm_ols']
:returns: instance variables 'betas' (nr_betas x nr_signals) and 'residuals' (nr_signals x nr_samples) are created.
""" |
if method is 'lstsq':
self.betas, residuals_sum, rank, s = LA.lstsq(self.design_matrix.T, self.resampled_signal.T)
self.residuals = self.resampled_signal - self.predict_from_design_matrix(self.design_matrix)
elif method is 'sm_ols':
import statsmodels.api as sm
assert self.resampled_signal.shape[0] == 1, \
'signal input into statsmodels OLS cannot contain multiple signals at once, present shape %s' % str(self.resampled_signal.shape)
model = sm.OLS(np.squeeze(self.resampled_signal),self.design_matrix.T)
results = model.fit()
# make betas and residuals that are compatible with the LA.lstsq type.
self.betas = np.array(results.params).reshape((self.design_matrix.shape[0], self.resampled_signal.shape[0]))
self.residuals = np.array(results.resid).reshape(self.resampled_signal.shape)
self.logger.debug('performed %s regression on %s design_matrix and %s signal' % (method, str(self.design_matrix.shape), str(self.resampled_signal.shape))) |
<SYSTEM_TASK:>
predict_from_design_matrix predicts signals given a design matrix.
<END_TASK>
<USER_TASK:>
Description:
def predict_from_design_matrix(self, design_matrix):
"""predict_from_design_matrix predicts signals given a design matrix.
:param design_matrix: design matrix from which to predict a signal.
:type design_matrix: numpy array, (nr_samples x betas.shape)
:returns: predicted signal(s)
:rtype: numpy array (nr_signals x nr_samples)
""" |
# check if we have already run the regression - which is necessary
assert hasattr(self, 'betas'), 'no betas found, please run regression before prediction'
assert design_matrix.shape[0] == self.betas.shape[0], \
'designmatrix needs to have the same number of regressors as the betas already calculated'
# betas = np.copy(self.betas.T, order="F", dtype = np.float32)
# f_design_matrix = np.copy(design_matrix, order = "F", dtype = np.float32)
prediction = np.dot(self.betas.astype(np.float32).T, design_matrix.astype(np.float32))
return prediction |
<SYSTEM_TASK:>
Remove all checks from a service.
<END_TASK>
<USER_TASK:>
Description:
def remove_service_checks(self, service_id):
"""
Remove all checks from a service.
""" |
from hypermap.aggregator.models import Service
service = Service.objects.get(id=service_id)
service.check_set.all().delete()
layer_to_process = service.layer_set.all()
for layer in layer_to_process:
layer.check_set.all().delete() |
<SYSTEM_TASK:>
Index a service in search engine.
<END_TASK>
<USER_TASK:>
Description:
def index_service(self, service_id):
"""
Index a service in search engine.
""" |
from hypermap.aggregator.models import Service
service = Service.objects.get(id=service_id)
if not service.is_valid:
LOGGER.debug('Not indexing service with id %s in search engine as it is not valid' % service.id)
return
LOGGER.debug('Indexing service %s' % service.id)
layer_to_process = service.layer_set.all()
for layer in layer_to_process:
if not settings.REGISTRY_SKIP_CELERY:
index_layer(layer.id, use_cache=True)
else:
index_layer(layer.id) |
<SYSTEM_TASK:>
Index a layer in the search backend.
<END_TASK>
<USER_TASK:>
Description:
def index_layer(self, layer_id, use_cache=False):
"""
Index a layer in the search backend.
If cache is set, append it to the list, if it isn't send the transaction right away.
cache needs memcached to be available.
""" |
from hypermap.aggregator.models import Layer
layer = Layer.objects.get(id=layer_id)
if not layer.is_valid:
LOGGER.debug('Not indexing or removing layer with id %s in search engine as it is not valid' % layer.id)
unindex_layer(layer.id, use_cache)
return
if layer.was_deleted:
LOGGER.debug('Not indexing or removing layer with id %s in search engine as was_deleted is true' % layer.id)
unindex_layer(layer.id, use_cache)
return
# 1. if we use cache
if use_cache:
LOGGER.debug('Caching layer with id %s for syncing with search engine' % layer.id)
layers = cache.get('layers')
if layers is None:
layers = set([layer.id])
else:
layers.add(layer.id)
cache.set('layers', layers)
return
# 2. if we don't use cache
# TODO: Make this function more DRY
# by abstracting the common bits.
if SEARCH_TYPE == 'solr':
from hypermap.aggregator.solr import SolrHypermap
LOGGER.debug('Syncing layer %s to solr' % layer.name)
solrobject = SolrHypermap()
success, message = solrobject.layer_to_solr(layer)
# update the error message if using celery
if not settings.REGISTRY_SKIP_CELERY:
if not success:
self.update_state(
state=states.FAILURE,
meta=message
)
raise Ignore()
elif SEARCH_TYPE == 'elasticsearch':
from hypermap.aggregator.elasticsearch_client import ESHypermap
LOGGER.debug('Syncing layer %s to es' % layer.name)
esobject = ESHypermap()
success, message = esobject.layer_to_es(layer)
# update the error message if using celery
if not settings.REGISTRY_SKIP_CELERY:
if not success:
self.update_state(
state=states.FAILURE,
meta=message
)
raise Ignore() |
<SYSTEM_TASK:>
Remove the index for layers in search backend, which are linked to an issue.
<END_TASK>
<USER_TASK:>
Description:
def unindex_layers_with_issues(self, use_cache=False):
"""
Remove the index for layers in search backend, which are linked to an issue.
""" |
from hypermap.aggregator.models import Issue, Layer, Service
from django.contrib.contenttypes.models import ContentType
layer_type = ContentType.objects.get_for_model(Layer)
service_type = ContentType.objects.get_for_model(Service)
for issue in Issue.objects.filter(content_type__pk=layer_type.id):
unindex_layer(issue.content_object.id, use_cache)
for issue in Issue.objects.filter(content_type__pk=service_type.id):
for layer in issue.content_object.layer_set.all():
unindex_layer(layer.id, use_cache) |
<SYSTEM_TASK:>
Remove the index for a layer in the search backend.
<END_TASK>
<USER_TASK:>
Description:
def unindex_layer(self, layer_id, use_cache=False):
"""
Remove the index for a layer in the search backend.
If cache is set, append it to the list of removed layers, if it isn't send the transaction right away.
""" |
from hypermap.aggregator.models import Layer
layer = Layer.objects.get(id=layer_id)
if use_cache:
LOGGER.debug('Caching layer with id %s for being removed from search engine' % layer.id)
deleted_layers = cache.get('deleted_layers')
if deleted_layers is None:
deleted_layers = set([layer.id])
else:
deleted_layers.add(layer.id)
cache.set('deleted_layers', deleted_layers)
return
if SEARCH_TYPE == 'solr':
from hypermap.aggregator.solr import SolrHypermap
LOGGER.debug('Removing layer %s from solr' % layer.id)
try:
solrobject = SolrHypermap()
solrobject.remove_layer(layer.uuid)
except Exception:
LOGGER.error('Layer NOT correctly removed from Solr')
elif SEARCH_TYPE == 'elasticsearch':
# TODO implement me
pass |
<SYSTEM_TASK:>
Index all layers in search engine.
<END_TASK>
<USER_TASK:>
Description:
def index_all_layers(self):
"""
Index all layers in search engine.
""" |
from hypermap.aggregator.models import Layer
if not settings.REGISTRY_SKIP_CELERY:
layers_cache = set(Layer.objects.filter(is_valid=True).values_list('id', flat=True))
deleted_layers_cache = set(Layer.objects.filter(is_valid=False).values_list('id', flat=True))
cache.set('layers', layers_cache)
cache.set('deleted_layers', deleted_layers_cache)
else:
for layer in Layer.objects.all():
index_layer(layer.id) |
<SYSTEM_TASK:>
Convenience function to create bag of words for anytext property
<END_TASK>
<USER_TASK:>
Description:
def gen_anytext(*args):
"""
Convenience function to create bag of words for anytext property
""" |
bag = []
for term in args:
if term is not None:
if isinstance(term, list):
for term2 in term:
if term2 is not None:
bag.append(term2)
else:
bag.append(term)
return ' '.join(bag) |
<SYSTEM_TASK:>
Used to process the lines of the endpoint list.
<END_TASK>
<USER_TASK:>
Description:
def endpointlist_post_save(instance, *args, **kwargs):
"""
Used to process the lines of the endpoint list.
""" |
with open(instance.upload.file.name, mode='rb') as f:
lines = f.readlines()
for url in lines:
if len(url) > 255:
LOGGER.debug('Skipping this endpoint, as it is more than 255 characters: %s' % url)
else:
if Endpoint.objects.filter(url=url, catalog=instance.catalog).count() == 0:
endpoint = Endpoint(url=url, endpoint_list=instance)
endpoint.catalog = instance.catalog
endpoint.save()
if not settings.REGISTRY_SKIP_CELERY:
update_endpoints.delay(instance.id)
else:
update_endpoints(instance.id) |
<SYSTEM_TASK:>
Used to do a layer full check when saving it.
<END_TASK>
<USER_TASK:>
Description:
def layer_post_save(instance, *args, **kwargs):
"""
Used to do a layer full check when saving it.
""" |
if instance.is_monitored and instance.service.is_monitored: # index and monitor
if not settings.REGISTRY_SKIP_CELERY:
check_layer.delay(instance.id)
else:
check_layer(instance.id)
else: # just index
index_layer(instance.id) |
<SYSTEM_TASK:>
Returns the Hypermap endpoint for a layer.
<END_TASK>
<USER_TASK:>
Description:
def get_url_endpoint(self):
"""
Returns the Hypermap endpoint for a layer.
This endpoint will be the WMTS MapProxy endpoint, only for WM we use the original endpoint.
""" |
endpoint = self.url
if self.type not in ('Hypermap:WorldMap',):
endpoint = 'registry/%s/layer/%s/map/wmts/1.0.0/WMTSCapabilities.xml' % (
self.catalog.slug,
self.id
)
return endpoint |
<SYSTEM_TASK:>
Check for availability of a layer and provide run metrics.
<END_TASK>
<USER_TASK:>
Description:
def check_available(self):
"""
Check for availability of a layer and provide run metrics.
""" |
success = True
start_time = datetime.datetime.utcnow()
message = ''
LOGGER.debug('Checking layer id %s' % self.id)
signals.post_save.disconnect(layer_post_save, sender=Layer)
try:
self.update_thumbnail()
except ValueError, err:
# caused by update_thumbnail()
# self.href is empty in arcserver.ExportMap
if str(err).startswith("unknown url type:"):
LOGGER.debug('Thumbnail can not be updated: %s' % str(err))
except Exception, err:
message = str(err)
success = False
signals.post_save.connect(layer_post_save, sender=Layer)
end_time = datetime.datetime.utcnow()
delta = end_time - start_time
response_time = '%s.%s' % (delta.seconds, delta.microseconds)
check = Check(
content_object=self,
success=success,
response_time=response_time,
message=message
)
check.save()
LOGGER.debug('Layer checked in %s seconds, status is %s' % (response_time, success))
return success, message |
<SYSTEM_TASK:>
Grabs input from the user and saves
<END_TASK>
<USER_TASK:>
Description:
def _input_github_repo(url=None):
""" Grabs input from the user and saves
it as their trytravis target repo """ |
if url is None:
url = user_input('Input the URL of the GitHub repository '
'to use as a `trytravis` repository: ')
url = url.strip()
http_match = _HTTPS_REGEX.match(url)
ssh_match = _SSH_REGEX.match(url)
if not http_match and not ssh_match:
raise RuntimeError('That URL doesn\'t look like a valid '
'GitHub URL. We expect something '
'of the form: `https://github.com/[USERNAME]/'
'[REPOSITORY]` or `ssh://[email protected]/'
'[USERNAME]/[REPOSITORY]')
# Make sure that the user actually made a new repository on GitHub.
if http_match:
_, name = http_match.groups()
else:
_, name = ssh_match.groups()
if 'trytravis' not in name:
raise RuntimeError('You must have `trytravis` in the name of your '
'repository. This is a security feature to reduce '
'chances of running git push -f on a repository '
'you don\'t mean to.')
# Make sure that the user actually wants to use this repository.
accept = user_input('Remember that `trytravis` will make commits on your '
'behalf to `%s`. Are you sure you wish to use this '
'repository? Type `y` or `yes` to accept: ' % url)
if accept.lower() not in ['y', 'yes']:
raise RuntimeError('Operation aborted by user.')
if not os.path.isdir(config_dir):
os.makedirs(config_dir)
with open(os.path.join(config_dir, 'repo'), 'w+') as f:
f.truncate()
f.write(url)
print('Repository saved successfully.') |
<SYSTEM_TASK:>
Loads the GitHub repository from the users config.
<END_TASK>
<USER_TASK:>
Description:
def _load_github_repo():
""" Loads the GitHub repository from the users config. """ |
if 'TRAVIS' in os.environ:
raise RuntimeError('Detected that we are running in Travis. '
'Stopping to prevent infinite loops.')
try:
with open(os.path.join(config_dir, 'repo'), 'r') as f:
return f.read()
except (OSError, IOError):
raise RuntimeError('Could not find your repository. '
'Have you ran `trytravis --repo`?') |
<SYSTEM_TASK:>
Temporarily commits local changes and submits them to
<END_TASK>
<USER_TASK:>
Description:
def _submit_changes_to_github_repo(path, url):
""" Temporarily commits local changes and submits them to
the GitHub repository that the user has specified. Then
reverts the changes to the git repository if a commit was
necessary. """ |
try:
repo = git.Repo(path)
except Exception:
raise RuntimeError('Couldn\'t locate a repository at `%s`.' % path)
commited = False
try:
try:
repo.delete_remote('trytravis')
except Exception:
pass
print('Adding a temporary remote to '
'`%s`...' % url)
remote = repo.create_remote('trytravis', url)
print('Adding all local changes...')
repo.git.add('--all')
try:
print('Committing local changes...')
timestamp = datetime.datetime.now().isoformat()
repo.git.commit(m='trytravis-' + timestamp)
commited = True
except git.exc.GitCommandError as e:
if 'nothing to commit' in str(e):
commited = False
else:
raise
commit = repo.head.commit.hexsha
committed_at = repo.head.commit.committed_datetime
print('Pushing to `trytravis` remote...')
remote.push(force=True)
finally:
if commited:
print('Reverting to old state...')
repo.git.reset('HEAD^')
try:
repo.delete_remote('trytravis')
except Exception:
pass
return commit, committed_at |
<SYSTEM_TASK:>
Waits for a Travis build to appear with the given commit SHA
<END_TASK>
<USER_TASK:>
Description:
def _wait_for_travis_build(url, commit, committed_at):
""" Waits for a Travis build to appear with the given commit SHA """ |
print('Waiting for a Travis build to appear '
'for `%s` after `%s`...' % (commit, committed_at))
import requests
slug = _slug_from_url(url)
start_time = time.time()
build_id = None
while time.time() - start_time < 60:
with requests.get('https://api.travis-ci.org/repos/%s/builds' % slug,
headers=_travis_headers()) as r:
if not r.ok:
raise RuntimeError('Could not reach the Travis API '
'endpoint. Additional information: '
'%s' % str(r.content))
# Search through all commits and builds to find our build.
commit_to_sha = {}
json = r.json()
for travis_commit in sorted(json['commits'],
key=lambda x: x['committed_at']):
travis_committed_at = datetime.datetime.strptime(
travis_commit['committed_at'], '%Y-%m-%dT%H:%M:%SZ'
).replace(tzinfo=utc)
if travis_committed_at < committed_at:
continue
commit_to_sha[travis_commit['id']] = travis_commit['sha']
for build in json['builds']:
if (build['commit_id'] in commit_to_sha and
commit_to_sha[build['commit_id']] == commit):
build_id = build['id']
print('Travis build id: `%d`' % build_id)
print('Travis build URL: `https://travis-ci.org/'
'%s/builds/%d`' % (slug, build_id))
if build_id is not None:
break
time.sleep(3.0)
else:
raise RuntimeError('Timed out while waiting for a Travis build '
'to start. Is Travis configured for `%s`?' % url)
return build_id |
<SYSTEM_TASK:>
Watches and progressively outputs information
<END_TASK>
<USER_TASK:>
Description:
def _watch_travis_build(build_id):
""" Watches and progressively outputs information
about a given Travis build """ |
import requests
try:
build_size = None # type: int
running = True
while running:
with requests.get('https://api.travis-ci.org/builds/%d' % build_id,
headers=_travis_headers()) as r:
json = r.json()
if build_size is not None:
if build_size > 1:
sys.stdout.write('\r\x1b[%dA' % build_size)
else:
sys.stdout.write('\r')
build_size = len(json['jobs'])
running = False
current_number = 1
for job in json['jobs']: # pragma: no coverage
color, state, is_running = _travis_job_state(job['state'])
if is_running:
running = True
platform = job['config']['os']
if platform == 'osx':
platform = ' osx '
env = job['config'].get('env', '')
sudo = 's' if job['config'].get('sudo', True) else 'c'
lang = job['config'].get('language', 'generic')
padding = ' ' * (len(str(build_size)) -
len(str(current_number)))
number = str(current_number) + padding
current_number += 1
job_display = '#' + ' '.join([number,
state,
platform,
sudo,
lang,
env])
print(color + job_display + colorama.Style.RESET_ALL)
time.sleep(3.0)
except KeyboardInterrupt:
pass |
<SYSTEM_TASK:>
Converts a Travis state into a state character, color,
<END_TASK>
<USER_TASK:>
Description:
def _travis_job_state(state):
""" Converts a Travis state into a state character, color,
and whether it's still running or a stopped state. """ |
if state in [None, 'queued', 'created', 'received']:
return colorama.Fore.YELLOW, '*', True
elif state in ['started', 'running']:
return colorama.Fore.LIGHTYELLOW_EX, '*', True
elif state == 'passed':
return colorama.Fore.LIGHTGREEN_EX, 'P', False
elif state == 'failed':
return colorama.Fore.LIGHTRED_EX, 'X', False
elif state == 'errored':
return colorama.Fore.LIGHTRED_EX, '!', False
elif state == 'canceled':
return colorama.Fore.LIGHTBLACK_EX, 'X', False
else:
raise RuntimeError('unknown state: %s' % str(state)) |
<SYSTEM_TASK:>
Parses a project slug out of either an HTTPS or SSH URL.
<END_TASK>
<USER_TASK:>
Description:
def _slug_from_url(url):
""" Parses a project slug out of either an HTTPS or SSH URL. """ |
http_match = _HTTPS_REGEX.match(url)
ssh_match = _SSH_REGEX.match(url)
if not http_match and not ssh_match:
raise RuntimeError('Could not parse the URL (`%s`) '
'for your repository.' % url)
if http_match:
return '/'.join(http_match.groups())
else:
return '/'.join(ssh_match.groups()) |
<SYSTEM_TASK:>
Main entry point when the user runs the `trytravis` command.
<END_TASK>
<USER_TASK:>
Description:
def main(argv=None): # pragma: no coverage
""" Main entry point when the user runs the `trytravis` command. """ |
try:
colorama.init()
if argv is None:
argv = sys.argv[1:]
_main(argv)
except RuntimeError as e:
print(colorama.Fore.RED + 'ERROR: ' +
str(e) + colorama.Style.RESET_ALL)
sys.exit(1)
else:
sys.exit(0) |
<SYSTEM_TASK:>
passed a string array
<END_TASK>
<USER_TASK:>
Description:
def good_coords(coords):
""" passed a string array """ |
if (len(coords) != 4):
return False
for coord in coords[0:3]:
try:
num = float(coord)
if (math.isnan(num)):
return False
if (math.isinf(num)):
return False
except ValueError:
return False
return True |
<SYSTEM_TASK:>
kill WSGI processes that may be running in development
<END_TASK>
<USER_TASK:>
Description:
def kill_process(procname, scriptname):
"""kill WSGI processes that may be running in development""" |
# from http://stackoverflow.com/a/2940878
import signal
import subprocess
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.decode().splitlines():
if procname in line and scriptname in line:
pid = int(line.split()[1])
info('Stopping %s %s %d' % (procname, scriptname, pid))
os.kill(pid, signal.SIGKILL) |
<SYSTEM_TASK:>
Populate a fresh installed Hypermap instances with basic services.
<END_TASK>
<USER_TASK:>
Description:
def populate_initial_services():
"""
Populate a fresh installed Hypermap instances with basic services.
""" |
services_list = (
(
'Harvard WorldMap',
'Harvard WorldMap open source web geospatial platform',
'Hypermap:WorldMap',
'http://worldmap.harvard.edu'
),
(
'NYPL MapWarper',
'The New York Public Library (NYPL) MapWarper web site',
'Hypermap:WARPER',
'http://maps.nypl.org/warper/maps'
),
(
'Map Warper',
'The MapWarper web site developed, hosted and maintained by Tim Waters',
'Hypermap:WARPER',
'http://mapwarper.net/maps'
),
(
'WorldMap Warp',
'The MapWarper instance part of the Harvard WorldMap project',
'Hypermap:WARPER',
'http://warp.worldmap.harvard.edu/maps'
),
(
'WFP GeoNode',
'World Food Programme GeoNode',
'OGC:WMS',
'http://geonode.wfp.org/geoserver/ows?'
),
(
'NASA EARTHDATA',
'NASA EARTHDATA, powered by EOSDIS',
'OGC:WMTS',
'http://map1.vis.earthdata.nasa.gov/wmts-geo/1.0.0/WMTSCapabilities.xml'
),
)
esri_endpoint = 'https://gis.ngdc.noaa.gov/arcgis/rest/services'
LOGGER.debug('*** Importing esri endpoint: %s' % esri_endpoint)
create_services_from_endpoint(esri_endpoint)
for service in services_list:
LOGGER.debug('*** Importing %s' % service[0])
service = Service(
title=service[0],
abstract=service[1],
type=service[2],
url=service[3]
)
service.save() |
<SYSTEM_TASK:>
For testing purpose
<END_TASK>
<USER_TASK:>
Description:
def main():
"""For testing purpose""" |
tcp_adapter = TcpAdapter("192.168.1.3", name="HASS", activate_source=False)
hdmi_network = HDMINetwork(tcp_adapter)
hdmi_network.start()
while True:
for d in hdmi_network.devices:
_LOGGER.info("Device: %s", d)
time.sleep(7) |
<SYSTEM_TASK:>
Compute difference in bits between digest1 and digest2
<END_TASK>
<USER_TASK:>
Description:
def compare_hexdigests( digest1, digest2 ):
"""Compute difference in bits between digest1 and digest2
returns -127 to 128; 128 is the same, -127 is different""" |
# convert to 32-tuple of unsighed two-byte INTs
digest1 = tuple([int(digest1[i:i+2],16) for i in range(0,63,2)])
digest2 = tuple([int(digest2[i:i+2],16) for i in range(0,63,2)])
bits = 0
for i in range(32):
bits += POPC[255 & digest1[i] ^ digest2[i]]
return 128 - bits |
<SYSTEM_TASK:>
Get accumulator for a transition n between chars a, b, c.
<END_TASK>
<USER_TASK:>
Description:
def tran3(self, a, b, c, n):
"""Get accumulator for a transition n between chars a, b, c.""" |
return (((TRAN[(a+n)&255]^TRAN[b]*(n+n+1))+TRAN[(c)^TRAN[n]])&255) |
<SYSTEM_TASK:>
Add data to running digest, increasing the accumulators for 0-8
<END_TASK>
<USER_TASK:>
Description:
def update(self, data):
"""Add data to running digest, increasing the accumulators for 0-8
triplets formed by this char and the previous 0-3 chars.""" |
for character in data:
if PY3:
ch = character
else:
ch = ord(character)
self.count += 1
# incr accumulators for triplets
if self.lastch[1] > -1:
self.acc[self.tran3(ch, self.lastch[0], self.lastch[1], 0)] +=1
if self.lastch[2] > -1:
self.acc[self.tran3(ch, self.lastch[0], self.lastch[2], 1)] +=1
self.acc[self.tran3(ch, self.lastch[1], self.lastch[2], 2)] +=1
if self.lastch[3] > -1:
self.acc[self.tran3(ch, self.lastch[0], self.lastch[3], 3)] +=1
self.acc[self.tran3(ch, self.lastch[1], self.lastch[3], 4)] +=1
self.acc[self.tran3(ch, self.lastch[2], self.lastch[3], 5)] +=1
self.acc[self.tran3(self.lastch[3], self.lastch[0], ch, 6)] +=1
self.acc[self.tran3(self.lastch[3], self.lastch[2], ch, 7)] +=1
# adjust last seen chars
self.lastch = [ch] + self.lastch[:3] |
<SYSTEM_TASK:>
Get digest of data seen thus far as a list of bytes.
<END_TASK>
<USER_TASK:>
Description:
def digest(self):
"""Get digest of data seen thus far as a list of bytes.""" |
total = 0 # number of triplets seen
if self.count == 3: # 3 chars = 1 triplet
total = 1
elif self.count == 4: # 4 chars = 4 triplets
total = 4
elif self.count > 4: # otherwise 8 triplets/char less
total = 8 * self.count - 28 # 28 'missed' during 'ramp-up'
threshold = total / 256 # threshold for accumulators, using the mean
code = [0]*32 # start with all zero bits
for i in range(256): # for all 256 accumulators
if self.acc[i] > threshold: # if it meets the threshold
code[i >> 3] += 1 << (i&7) # set corresponding digest bit, equivalent to i/8, 2 ** (i % 8)
return code[::-1] |
<SYSTEM_TASK:>
Update running digest with content of named file.
<END_TASK>
<USER_TASK:>
Description:
def from_file(self, filename):
"""Update running digest with content of named file.""" |
f = open(filename, 'rb')
while True:
data = f.read(10480)
if not data:
break
self.update(data)
f.close() |
<SYSTEM_TASK:>
Compute difference in bits between own digest and another.
<END_TASK>
<USER_TASK:>
Description:
def compare(self, otherdigest, ishex=False):
"""Compute difference in bits between own digest and another.
returns -127 to 128; 128 is the same, -127 is different""" |
bits = 0
myd = self.digest()
if ishex:
# convert to 32-tuple of unsighed two-byte INTs
otherdigest = tuple([int(otherdigest[i:i+2],16) for i in range(0,63,2)])
for i in range(32):
bits += POPC[255 & myd[i] ^ otherdigest[i]]
return 128 - bits |
<SYSTEM_TASK:>
JD Output function. Does quick pretty printing of a CloudGenix Response body. This function returns a string
<END_TASK>
<USER_TASK:>
Description:
def jdout(api_response):
"""
JD Output function. Does quick pretty printing of a CloudGenix Response body. This function returns a string
instead of directly printing content.
**Parameters:**
- **api_response:** A CloudGenix-attribute extended `requests.Response` object
**Returns:** Pretty-formatted text of the Response body
""" |
try:
# attempt to output the cgx_content. should always be a Dict if it exists.
output = json.dumps(api_response.cgx_content, indent=4)
except (TypeError, ValueError, AttributeError):
# cgx_content did not exist, or was not JSON serializable. Try pretty output the base obj.
try:
output = json.dumps(api_response, indent=4)
except (TypeError, ValueError, AttributeError):
# Same issue, just raw output the passed data. Let any exceptions happen here.
output = api_response
return output |
<SYSTEM_TASK:>
JD Output Detailed function. Meant for quick DETAILED pretty-printing of CloudGenix Request and Response
<END_TASK>
<USER_TASK:>
Description:
def jdout_detailed(api_response, sensitive=False):
"""
JD Output Detailed function. Meant for quick DETAILED pretty-printing of CloudGenix Request and Response
objects for troubleshooting. This function returns a string instead of directly printing content.
**Parameters:**
- **api_response:** A CloudGenix-attribute extended `requests.Response` object
- **sensitive:** Boolean, if True will print sensitive content (specifically, authentication cookies/headers).
**Returns:** Pretty-formatted text of the Request, Request Headers, Request body, Response, Response Headers,
and Response Body.
""" |
try:
# try to be super verbose.
output = "REQUEST: {0} {1}\n".format(api_response.request.method, api_response.request.path_url)
output += "REQUEST HEADERS:\n"
for key, value in api_response.request.headers.items():
# look for sensitive values
if key.lower() in ['cookie'] and not sensitive:
# we need to do some work to watch for the AUTH_TOKEN cookie. Split on cookie separator
cookie_list = value.split('; ')
muted_cookie_list = []
for cookie in cookie_list:
# check if cookie starts with a permutation of AUTH_TOKEN/whitespace.
if cookie.lower().strip().startswith('auth_token='):
# first 11 chars of cookie with whitespace removed + mute string.
newcookie = cookie.strip()[:11] + "\"<SENSITIVE - NOT SHOWN BY DEFAULT>\""
muted_cookie_list.append(newcookie)
else:
muted_cookie_list.append(cookie)
# got list of cookies, muted as needed. recombine.
muted_value = "; ".join(muted_cookie_list)
output += "\t{0}: {1}\n".format(key, muted_value)
elif key.lower() in ['x-auth-token'] and not sensitive:
output += "\t{0}: {1}\n".format(key, "<SENSITIVE - NOT SHOWN BY DEFAULT>")
else:
output += "\t{0}: {1}\n".format(key, value)
# if body not present, output blank.
if not api_response.request.body:
output += "REQUEST BODY:\n{0}\n\n".format({})
else:
try:
# Attempt to load JSON from string to make it look beter.
output += "REQUEST BODY:\n{0}\n\n".format(json.dumps(json.loads(api_response.request.body), indent=4))
except (TypeError, ValueError, AttributeError):
# if pretty call above didn't work, just toss it to jdout to best effort it.
output += "REQUEST BODY:\n{0}\n\n".format(jdout(api_response.request.body))
output += "RESPONSE: {0} {1}\n".format(api_response.status_code, api_response.reason)
output += "RESPONSE HEADERS:\n"
for key, value in api_response.headers.items():
output += "\t{0}: {1}\n".format(key, value)
try:
# look for CGX content first.
output += "RESPONSE DATA:\n{0}".format(json.dumps(api_response.cgx_content, indent=4))
except (TypeError, ValueError, AttributeError):
# look for standard response data.
output += "RESPONSE DATA:\n{0}".format(json.dumps(json.loads(api_response.content), indent=4))
except (TypeError, ValueError, AttributeError, UnicodeDecodeError):
# cgx_content did not exist, or was not JSON serializable. Try pretty output the base obj.
try:
output = json.dumps(api_response, indent=4)
except (TypeError, ValueError, AttributeError):
# Same issue, just raw output the passed data. Let any exceptions happen here.
output = api_response
return output |
<SYSTEM_TASK:>
Check for a new version of the SDK on API constructor instantiation. If new version found, print
<END_TASK>
<USER_TASK:>
Description:
def notify_for_new_version(self):
"""
Check for a new version of the SDK on API constructor instantiation. If new version found, print
Notification to STDERR.
On failure of this check, fail silently.
**Returns:** No item returned, directly prints notification to `sys.stderr`.
""" |
# broad exception clause, if this fails for any reason just return.
try:
recommend_update = False
update_check_resp = requests.get(self.update_info_url, timeout=3)
web_version = update_check_resp.json()["info"]["version"]
api_logger.debug("RETRIEVED_VERSION: %s", web_version)
available_version = SDK_BUILD_REGEX.search(web_version).groupdict()
current_version = SDK_BUILD_REGEX.search(self.version).groupdict()
available_major = available_version.get('major')
available_minor = available_version.get('minor')
available_patch = available_version.get('patch')
available_build = available_version.get('build')
current_major = current_version.get('major')
current_minor = current_version.get('minor')
current_patch = current_version.get('patch')
current_build = current_version.get('build')
api_logger.debug("AVAILABLE_VERSION: %s", available_version)
api_logger.debug("CURRENT_VERSION: %s", current_version)
# check for major/minor version differences, do not alert for build differences.
if available_major > current_major:
recommend_update = True
elif available_major >= current_major and available_minor > current_minor:
recommend_update = True
elif available_major >= current_major and available_minor >= current_minor and \
available_patch > current_patch:
recommend_update = True
api_logger.debug("NEED_UPDATE: %s", recommend_update)
# notify.
if recommend_update:
sys.stderr.write("WARNING: CloudGenix Python SDK upgrade available. SDKs are typically deprecated 6 "
"months after release of a new version.\n"
"\tLatest Version: {0}\n"
"\tCurrent Version: {1}\n"
"\tFor more info, see 'https://github.com/cloudgenix/sdk-python'. Additionally, this "
"message can be suppressed by instantiating the API with API(update_check=False).\n\n"
"".format(web_version, self.version))
return
except Exception:
# just return and continue.
return |
<SYSTEM_TASK:>
Modify ssl verification settings
<END_TASK>
<USER_TASK:>
Description:
def ssl_verify(self, ssl_verify):
"""
Modify ssl verification settings
**Parameters:**
- ssl_verify:
- True: Verify using builtin BYTE_CA_BUNDLE.
- False: No SSL Verification.
- Str: Full path to a x509 PEM CA File or bundle.
**Returns:** Mutates API object in place, no return.
""" |
self.verify = ssl_verify
# if verify true/false, set ca_verify_file appropriately
if isinstance(self.verify, bool):
if self.verify: # True
if os.name == 'nt':
# Windows does not allow tmpfile access w/out close. Close file then delete it when done.
self._ca_verify_file_handle = temp_ca_bundle(delete=False)
self._ca_verify_file_handle.write(BYTE_CA_BUNDLE)
self._ca_verify_file_handle.flush()
self.ca_verify_filename = self._ca_verify_file_handle.name
self._ca_verify_file_handle.close()
# Other (POSIX/Unix/Linux/OSX)
else:
self._ca_verify_file_handle = temp_ca_bundle()
self._ca_verify_file_handle.write(BYTE_CA_BUNDLE)
self._ca_verify_file_handle.flush()
self.ca_verify_filename = self._ca_verify_file_handle.name
# register cleanup function for temp file.
atexit.register(self._cleanup_ca_temp_file)
else: # False
# disable warnings for SSL certs.
urllib3.disable_warnings()
self.ca_verify_filename = False
else: # Not True/False, assume path to file/dir for Requests
self.ca_verify_filename = self.verify
return |
<SYSTEM_TASK:>
Modify retry parameters for the SDK's rest call object.
<END_TASK>
<USER_TASK:>
Description:
def modify_rest_retry(self, total=8, connect=None, read=None, redirect=None, status=None,
method_whitelist=urllib3.util.retry.Retry.DEFAULT_METHOD_WHITELIST, status_forcelist=None,
backoff_factor=0.705883, raise_on_redirect=True, raise_on_status=True,
respect_retry_after_header=True, adapter_url="https://"):
"""
Modify retry parameters for the SDK's rest call object.
Parameters are directly from and passed directly to `urllib3.util.retry.Retry`, and get applied directly to
the underlying `requests.Session` object.
Default retry with total=8 and backoff_factor=0.705883:
- Try 1, 0 delay (0 total seconds)
- Try 2, 0 delay (0 total seconds)
- Try 3, 0.705883 delay (0.705883 total seconds)
- Try 4, 1.411766 delay (2.117649 total seconds)
- Try 5, 2.823532 delay (4.941181 total seconds)
- Try 6, 5.647064 delay (10.588245 total seconds)
- Try 7, 11.294128 delay (21.882373 total seconds)
- Try 8, 22.588256 delay (44.470629 total seconds)
- Try 9, 45.176512 delay (89.647141 total seconds)
- Try 10, 90.353024 delay (180.000165 total seconds)
**Parameters:**
- **total:** int, Total number of retries to allow. Takes precedence over other counts.
- **connect:** int, How many connection-related errors to retry on.
- **read:** int, How many times to retry on read errors.
- **redirect:** int, How many redirects to perform. loops.
- **status:** int, How many times to retry on bad status codes.
- **method_whitelist:** iterable, Set of uppercased HTTP method verbs that we should retry on.
- **status_forcelist:** iterable, A set of integer HTTP status codes that we should force a retry on.
- **backoff_factor:** float, A backoff factor to apply between attempts after the second try.
- **raise_on_redirect:** bool, True = raise a MaxRetryError, False = return latest 3xx response.
- **raise_on_status:** bool, Similar logic to ``raise_on_redirect`` but for status responses.
- **respect_retry_after_header:** bool, Whether to respect Retry-After header on status codes.
- **adapter_url:** string, URL match for these retry values (default `https://`)
**Returns:** No return, mutates the session directly
""" |
# Cloudgenix responses with 502/504 are usually recoverable. Use them if no list specified.
if status_forcelist is None:
status_forcelist = (413, 429, 502, 503, 504)
retry = urllib3.util.retry.Retry(total=total,
connect=connect,
read=read,
redirect=redirect,
status=status,
method_whitelist=method_whitelist,
status_forcelist=status_forcelist,
backoff_factor=backoff_factor,
raise_on_redirect=raise_on_redirect,
raise_on_status=raise_on_status,
respect_retry_after_header=respect_retry_after_header)
adapter = requests.adapters.HTTPAdapter(max_retries=retry)
self._session.mount(adapter_url, adapter)
return |
<SYSTEM_TASK:>
Change the debug level of the API
<END_TASK>
<USER_TASK:>
Description:
def set_debug(self, debuglevel):
"""
Change the debug level of the API
**Returns:** No item returned.
""" |
if isinstance(debuglevel, int):
self._debuglevel = debuglevel
if self._debuglevel == 1:
logging.basicConfig(level=logging.INFO,
format="%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s")
api_logger.setLevel(logging.INFO)
elif self._debuglevel == 2:
logging.basicConfig(level=logging.DEBUG,
format="%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s")
requests.cookies.cookielib.debug = True
api_logger.setLevel(logging.DEBUG)
elif self._debuglevel >= 3:
logging.basicConfig(level=logging.DEBUG,
format="%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s")
requests.cookies.cookielib.debug = True
api_logger.setLevel(logging.DEBUG)
urllib3_logger = logging.getLogger("requests.packages.urllib3")
urllib3_logger.setLevel(logging.DEBUG)
urllib3_logger.propagate = True
else:
# Remove all handlers
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
# set logging level to default
requests.cookies.cookielib.debug = False
api_logger.setLevel(logging.WARNING)
return |
<SYSTEM_TASK:>
Call subclasses via function to allow passing parent namespace to subclasses.
<END_TASK>
<USER_TASK:>
Description:
def _subclass_container(self):
"""
Call subclasses via function to allow passing parent namespace to subclasses.
**Returns:** dict with subclass references.
""" |
_parent_class = self
class GetWrapper(Get):
def __init__(self):
self._parent_class = _parent_class
class PostWrapper(Post):
def __init__(self):
self._parent_class = _parent_class
class PutWrapper(Put):
def __init__(self):
self._parent_class = _parent_class
class PatchWrapper(Patch):
def __init__(self):
self._parent_class = _parent_class
class DeleteWrapper(Delete):
def __init__(self):
self._parent_class = _parent_class
class InteractiveWrapper(Interactive):
def __init__(self):
self._parent_class = _parent_class
return {"get": GetWrapper,
"post": PostWrapper,
"put": PutWrapper,
"patch": PatchWrapper,
"delete": DeleteWrapper,
"interactive": InteractiveWrapper} |
<SYSTEM_TASK:>
Function to clean up ca temp file for requests.
<END_TASK>
<USER_TASK:>
Description:
def _cleanup_ca_temp_file(self):
"""
Function to clean up ca temp file for requests.
**Returns:** Removes TEMP ca file, no return
""" |
if os.name == 'nt':
if isinstance(self.ca_verify_filename, (binary_type, text_type)):
# windows requires file to be closed for access. Have to manually remove
os.unlink(self.ca_verify_filename)
else:
# other OS's allow close and delete of file.
self._ca_verify_file_handle.close() |
<SYSTEM_TASK:>
Break auth_token up into it's constituent values.
<END_TASK>
<USER_TASK:>
Description:
def parse_auth_token(self, auth_token):
"""
Break auth_token up into it's constituent values.
**Parameters:**
- **auth_token:** Auth_token string
**Returns:** dict with Auth Token constituents
""" |
# remove the random security key value from the front of the auth_token
auth_token_cleaned = auth_token.split('-', 1)[1]
# URL Decode the Auth Token
auth_token_decoded = self.url_decode(auth_token_cleaned)
# Create a new dict to hold the response.
auth_dict = {}
# Parse the token
for key_value in auth_token_decoded.split("&"):
key_value_list = key_value.split("=")
# check for valid token parts
if len(key_value_list) == 2 and type(key_value_list[0]) in [text_type, binary_type]:
auth_dict[key_value_list[0]] = key_value_list[1]
# Return the dict of key/values in the token.
return auth_dict |
<SYSTEM_TASK:>
Return region from a successful login response.
<END_TASK>
<USER_TASK:>
Description:
def parse_region(self, login_response):
"""
Return region from a successful login response.
**Parameters:**
- **login_response:** requests.Response from a successful login.
**Returns:** region name.
""" |
auth_token = login_response.cgx_content['x_auth_token']
auth_token_dict = self.parse_auth_token(auth_token)
auth_region = auth_token_dict.get('region')
return auth_region |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.