body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
bba7261a8e75fd0c281606b458cc35aedc3175b4fe2ddba16282c47a7810a627
|
def __setstate__(self, d):
'\n Sets the object to have the state described by `d`.\n\n Parameters\n ----------\n d : dict\n A dictionary mapping string names of fields to values for\n these fields.\n '
self.__dict__.update(d)
if ('batch_record' not in d):
self.batch_record = ([None] * len(self.val_record))
if ('epoch_record' not in d):
self.epoch_record = range(len(self.val_record))
if ('time_record' not in d):
self.time_record = ([None] * len(self.val_record))
|
Sets the object to have the state described by `d`.
Parameters
----------
d : dict
A dictionary mapping string names of fields to values for
these fields.
|
pylearn2/monitor.py
|
__setstate__
|
fxyu/pylearn2
| 2,045 |
python
|
def __setstate__(self, d):
'\n Sets the object to have the state described by `d`.\n\n Parameters\n ----------\n d : dict\n A dictionary mapping string names of fields to values for\n these fields.\n '
self.__dict__.update(d)
if ('batch_record' not in d):
self.batch_record = ([None] * len(self.val_record))
if ('epoch_record' not in d):
self.epoch_record = range(len(self.val_record))
if ('time_record' not in d):
self.time_record = ([None] * len(self.val_record))
|
def __setstate__(self, d):
'\n Sets the object to have the state described by `d`.\n\n Parameters\n ----------\n d : dict\n A dictionary mapping string names of fields to values for\n these fields.\n '
self.__dict__.update(d)
if ('batch_record' not in d):
self.batch_record = ([None] * len(self.val_record))
if ('epoch_record' not in d):
self.epoch_record = range(len(self.val_record))
if ('time_record' not in d):
self.time_record = ([None] * len(self.val_record))<|docstring|>Sets the object to have the state described by `d`.
Parameters
----------
d : dict
A dictionary mapping string names of fields to values for
these fields.<|endoftext|>
|
894d2fe1f09b85e545a1d29c43cfad616558a181b8768213a24d699904131d42
|
def start_daemon(self, force_start=False):
" Start the logging daemon, if it is not already running.\n\n The logging daemon uses the values found in config.ini in the working directory.\n Calls are made to the CMC API at the configuration interval set with the requried parameters.\n\n Parameters:\n force_start: Start the daemon even if it is already running. Warning, this could result in rate limiting.\n\n Return:\n This call doesn't return\n "
if (daemon_already_running() and (not force_start)):
return
self.__status.set_value(settings.status_file_current_session_section_name, settings.status_file_option_successful_calls, 0)
self.__status.set_value(settings.status_file_current_session_section_name, settings.status_file_option_failed_calls, 0)
self.__status.set_value(settings.status_file_current_session_section_name, settings.status_file_option_success_rate, 100.0)
self.__create_API_and_publisher()
self.__daemon()
|
Start the logging daemon, if it is not already running.
The logging daemon uses the values found in config.ini in the working directory.
Calls are made to the CMC API at the configuration interval set with the requried parameters.
Parameters:
force_start: Start the daemon even if it is already running. Warning, this could result in rate limiting.
Return:
This call doesn't return
|
cmclogger/cmclogger.py
|
start_daemon
|
Abhisheknishant/CMCLogger
| 0 |
python
|
def start_daemon(self, force_start=False):
" Start the logging daemon, if it is not already running.\n\n The logging daemon uses the values found in config.ini in the working directory.\n Calls are made to the CMC API at the configuration interval set with the requried parameters.\n\n Parameters:\n force_start: Start the daemon even if it is already running. Warning, this could result in rate limiting.\n\n Return:\n This call doesn't return\n "
if (daemon_already_running() and (not force_start)):
return
self.__status.set_value(settings.status_file_current_session_section_name, settings.status_file_option_successful_calls, 0)
self.__status.set_value(settings.status_file_current_session_section_name, settings.status_file_option_failed_calls, 0)
self.__status.set_value(settings.status_file_current_session_section_name, settings.status_file_option_success_rate, 100.0)
self.__create_API_and_publisher()
self.__daemon()
|
def start_daemon(self, force_start=False):
" Start the logging daemon, if it is not already running.\n\n The logging daemon uses the values found in config.ini in the working directory.\n Calls are made to the CMC API at the configuration interval set with the requried parameters.\n\n Parameters:\n force_start: Start the daemon even if it is already running. Warning, this could result in rate limiting.\n\n Return:\n This call doesn't return\n "
if (daemon_already_running() and (not force_start)):
return
self.__status.set_value(settings.status_file_current_session_section_name, settings.status_file_option_successful_calls, 0)
self.__status.set_value(settings.status_file_current_session_section_name, settings.status_file_option_failed_calls, 0)
self.__status.set_value(settings.status_file_current_session_section_name, settings.status_file_option_success_rate, 100.0)
self.__create_API_and_publisher()
self.__daemon()<|docstring|>Start the logging daemon, if it is not already running.
The logging daemon uses the values found in config.ini in the working directory.
Calls are made to the CMC API at the configuration interval set with the requried parameters.
Parameters:
force_start: Start the daemon even if it is already running. Warning, this could result in rate limiting.
Return:
This call doesn't return<|endoftext|>
|
5ecb28bbd41f07a618ea857f8e1bf06f14f8255490e20025acc73ca364932e68
|
def fetch_and_store_data(self, api_key=None):
' Make an CMC API call and store the result in the database / status file\n\n The configuration will be sourced from config.ini in the working directory used\n when the object was created.\n\n Parameters:\n api_key: Optionally make this API call with a different key as specified.\n\n Returns:\n (respnse_status, latest_data, latest_status) (tuple)\n\n response_status (bool): True if the API call and storage completed successfully.\n latest_data (dict): Latest good data returned from coin market cap API. This data is\n guarenteed to be from the latest call only if response_status is True.\n status (dict): Dictionary containing the status of the last call.\n '
self.__create_API_and_publisher()
goodResponse = self.__api.getLatest(api_key)
self.__publisher.writeStatus(self.__api.getLatestStatus())
if (goodResponse is True):
self.__publisher.writeData(self.__api.getLatestData())
return (goodResponse, self.__api.getLatestData(), self.__api.getLatestStatus())
|
Make an CMC API call and store the result in the database / status file
The configuration will be sourced from config.ini in the working directory used
when the object was created.
Parameters:
api_key: Optionally make this API call with a different key as specified.
Returns:
(respnse_status, latest_data, latest_status) (tuple)
response_status (bool): True if the API call and storage completed successfully.
latest_data (dict): Latest good data returned from coin market cap API. This data is
guarenteed to be from the latest call only if response_status is True.
status (dict): Dictionary containing the status of the last call.
|
cmclogger/cmclogger.py
|
fetch_and_store_data
|
Abhisheknishant/CMCLogger
| 0 |
python
|
def fetch_and_store_data(self, api_key=None):
' Make an CMC API call and store the result in the database / status file\n\n The configuration will be sourced from config.ini in the working directory used\n when the object was created.\n\n Parameters:\n api_key: Optionally make this API call with a different key as specified.\n\n Returns:\n (respnse_status, latest_data, latest_status) (tuple)\n\n response_status (bool): True if the API call and storage completed successfully.\n latest_data (dict): Latest good data returned from coin market cap API. This data is\n guarenteed to be from the latest call only if response_status is True.\n status (dict): Dictionary containing the status of the last call.\n '
self.__create_API_and_publisher()
goodResponse = self.__api.getLatest(api_key)
self.__publisher.writeStatus(self.__api.getLatestStatus())
if (goodResponse is True):
self.__publisher.writeData(self.__api.getLatestData())
return (goodResponse, self.__api.getLatestData(), self.__api.getLatestStatus())
|
def fetch_and_store_data(self, api_key=None):
' Make an CMC API call and store the result in the database / status file\n\n The configuration will be sourced from config.ini in the working directory used\n when the object was created.\n\n Parameters:\n api_key: Optionally make this API call with a different key as specified.\n\n Returns:\n (respnse_status, latest_data, latest_status) (tuple)\n\n response_status (bool): True if the API call and storage completed successfully.\n latest_data (dict): Latest good data returned from coin market cap API. This data is\n guarenteed to be from the latest call only if response_status is True.\n status (dict): Dictionary containing the status of the last call.\n '
self.__create_API_and_publisher()
goodResponse = self.__api.getLatest(api_key)
self.__publisher.writeStatus(self.__api.getLatestStatus())
if (goodResponse is True):
self.__publisher.writeData(self.__api.getLatestData())
return (goodResponse, self.__api.getLatestData(), self.__api.getLatestStatus())<|docstring|>Make an CMC API call and store the result in the database / status file
The configuration will be sourced from config.ini in the working directory used
when the object was created.
Parameters:
api_key: Optionally make this API call with a different key as specified.
Returns:
(respnse_status, latest_data, latest_status) (tuple)
response_status (bool): True if the API call and storage completed successfully.
latest_data (dict): Latest good data returned from coin market cap API. This data is
guarenteed to be from the latest call only if response_status is True.
status (dict): Dictionary containing the status of the last call.<|endoftext|>
|
f86b9567fff62a364c799e346bd2a770cfb54638aaf042c9b4a1faea4f52cdf1
|
def write_custom_status(self, status):
' Enter a custom status into the results API\n\n Parameters:\n status (dict): A correctly structured dictionary can be obtained from the\n returned status of fetch_and_store_data\n '
self.__create_API_and_publisher()
self.__publisher.writeStatus(status)
|
Enter a custom status into the results API
Parameters:
status (dict): A correctly structured dictionary can be obtained from the
returned status of fetch_and_store_data
|
cmclogger/cmclogger.py
|
write_custom_status
|
Abhisheknishant/CMCLogger
| 0 |
python
|
def write_custom_status(self, status):
' Enter a custom status into the results API\n\n Parameters:\n status (dict): A correctly structured dictionary can be obtained from the\n returned status of fetch_and_store_data\n '
self.__create_API_and_publisher()
self.__publisher.writeStatus(status)
|
def write_custom_status(self, status):
' Enter a custom status into the results API\n\n Parameters:\n status (dict): A correctly structured dictionary can be obtained from the\n returned status of fetch_and_store_data\n '
self.__create_API_and_publisher()
self.__publisher.writeStatus(status)<|docstring|>Enter a custom status into the results API
Parameters:
status (dict): A correctly structured dictionary can be obtained from the
returned status of fetch_and_store_data<|endoftext|>
|
7a98a03271e737185d8ca1461e9b5af94ef35982bd10fda895b3a1a8b383e133
|
def data_request(self, request):
' Request data from the database\n\n Parameters:\n request (dict): {"query_type", "query_tag", "output_format", "output_detail"}\n '
reader = Reader(self.__status, self.__database, self.__config)
print(reader.processRequest(request))
|
Request data from the database
Parameters:
request (dict): {"query_type", "query_tag", "output_format", "output_detail"}
|
cmclogger/cmclogger.py
|
data_request
|
Abhisheknishant/CMCLogger
| 0 |
python
|
def data_request(self, request):
' Request data from the database\n\n Parameters:\n request (dict): {"query_type", "query_tag", "output_format", "output_detail"}\n '
reader = Reader(self.__status, self.__database, self.__config)
print(reader.processRequest(request))
|
def data_request(self, request):
' Request data from the database\n\n Parameters:\n request (dict): {"query_type", "query_tag", "output_format", "output_detail"}\n '
reader = Reader(self.__status, self.__database, self.__config)
print(reader.processRequest(request))<|docstring|>Request data from the database
Parameters:
request (dict): {"query_type", "query_tag", "output_format", "output_detail"}<|endoftext|>
|
5cf9b2a39d1cc1b94a28aac48993428ae745d277bda6632428b0068ce80529e8
|
def get_database(self):
' Get the dbops.SQHelper data base object being used.'
return self.__database
|
Get the dbops.SQHelper data base object being used.
|
cmclogger/cmclogger.py
|
get_database
|
Abhisheknishant/CMCLogger
| 0 |
python
|
def get_database(self):
' '
return self.__database
|
def get_database(self):
' '
return self.__database<|docstring|>Get the dbops.SQHelper data base object being used.<|endoftext|>
|
e9273fe91029fbc0b5c743c049e4ec4bfcdf01e9dadc15a557f1570ec9d52982
|
def get_status_file(self):
' Get the configchecker.ConfigChecker status file object being used.'
return self.__status
|
Get the configchecker.ConfigChecker status file object being used.
|
cmclogger/cmclogger.py
|
get_status_file
|
Abhisheknishant/CMCLogger
| 0 |
python
|
def get_status_file(self):
' '
return self.__status
|
def get_status_file(self):
' '
return self.__status<|docstring|>Get the configchecker.ConfigChecker status file object being used.<|endoftext|>
|
251361ec23afed8cf5d2f16fb883338578de2879d5ea0005d2f907aadbaf1c46
|
def get_config_file(self):
' Get the configchecker.ConfigChecker config file object being used.'
return self.__config
|
Get the configchecker.ConfigChecker config file object being used.
|
cmclogger/cmclogger.py
|
get_config_file
|
Abhisheknishant/CMCLogger
| 0 |
python
|
def get_config_file(self):
' '
return self.__config
|
def get_config_file(self):
' '
return self.__config<|docstring|>Get the configchecker.ConfigChecker config file object being used.<|endoftext|>
|
861a9e83ea6c27d6d4043a495f26d0ec0272cb7b478dcc18b860fe46272a4f9c
|
def get_relative_path(start_path, path):
' 返回相对路径 '
return os.path.relpath(path, start_path)
|
返回相对路径
|
ezfile/__init__.py
|
get_relative_path
|
menzi11/EZFile
| 1 |
python
|
def get_relative_path(start_path, path):
' '
return os.path.relpath(path, start_path)
|
def get_relative_path(start_path, path):
' '
return os.path.relpath(path, start_path)<|docstring|>返回相对路径<|endoftext|>
|
1d633960c58af207036cfb288073825423ca879881d26065a895ce6b1a0d9ddb
|
def exists(path):
'tell if a path exists'
return os.path.exists(path)
|
tell if a path exists
|
ezfile/__init__.py
|
exists
|
menzi11/EZFile
| 1 |
python
|
def exists(path):
return os.path.exists(path)
|
def exists(path):
return os.path.exists(path)<|docstring|>tell if a path exists<|endoftext|>
|
cd2da9b9366633e6f4827e9889ace8d1403726d4d0fe62a85f07ffb448a89549
|
def exists_as_dir(path):
' check if a path is a dir and it is exists'
return (exists(path) and os.path.isdir(path))
|
check if a path is a dir and it is exists
|
ezfile/__init__.py
|
exists_as_dir
|
menzi11/EZFile
| 1 |
python
|
def exists_as_dir(path):
' '
return (exists(path) and os.path.isdir(path))
|
def exists_as_dir(path):
' '
return (exists(path) and os.path.isdir(path))<|docstring|>check if a path is a dir and it is exists<|endoftext|>
|
63c9552302ad380125589a2b72c9ef90d61d898ac8c0c9e4efce575fe735aa02
|
def exists_as_file(path):
' check if a path is a file and it is exists '
return (exists(path) and os.path.isfile(path))
|
check if a path is a file and it is exists
|
ezfile/__init__.py
|
exists_as_file
|
menzi11/EZFile
| 1 |
python
|
def exists_as_file(path):
' '
return (exists(path) and os.path.isfile(path))
|
def exists_as_file(path):
' '
return (exists(path) and os.path.isfile(path))<|docstring|>check if a path is a file and it is exists<|endoftext|>
|
79c02f176d3ea9ce5d2582982e85a7aa434fc4c546fb19723810ea5df1a0dd02
|
def get_full_path_with_ext(path):
' return full path of a file(abspath)'
return os.path.abspath(path)
|
return full path of a file(abspath)
|
ezfile/__init__.py
|
get_full_path_with_ext
|
menzi11/EZFile
| 1 |
python
|
def get_full_path_with_ext(path):
' '
return os.path.abspath(path)
|
def get_full_path_with_ext(path):
' '
return os.path.abspath(path)<|docstring|>return full path of a file(abspath)<|endoftext|>
|
2b0396b5745fc5780353539520090cb8c1bce81f715f95c7d3443c490619b026
|
def get_full_path_without_ext(path):
' return full path of a file without ext '
return get_sibling_file(path, get_short_name_without_ext(path))
|
return full path of a file without ext
|
ezfile/__init__.py
|
get_full_path_without_ext
|
menzi11/EZFile
| 1 |
python
|
def get_full_path_without_ext(path):
' '
return get_sibling_file(path, get_short_name_without_ext(path))
|
def get_full_path_without_ext(path):
' '
return get_sibling_file(path, get_short_name_without_ext(path))<|docstring|>return full path of a file without ext<|endoftext|>
|
2bea3fdacc1cadb81299f51310c592b774d9ea009ef1dedd1efbdea1d4d64472
|
def get_ext(path):
' get file ext '
return os.path.splitext(get_short_name_with_ext(path))[1]
|
get file ext
|
ezfile/__init__.py
|
get_ext
|
menzi11/EZFile
| 1 |
python
|
def get_ext(path):
' '
return os.path.splitext(get_short_name_with_ext(path))[1]
|
def get_ext(path):
' '
return os.path.splitext(get_short_name_with_ext(path))[1]<|docstring|>get file ext<|endoftext|>
|
b56cd973e73f8d09ac442aa2aafe6448a55fcdec59bedae15305aaac4963c683
|
def get_short_name_without_ext(path):
' get file short name without ext, for example: "c:/1.txt" will return "1" '
return os.path.splitext(get_short_name_with_ext(path))[0]
|
get file short name without ext, for example: "c:/1.txt" will return "1"
|
ezfile/__init__.py
|
get_short_name_without_ext
|
menzi11/EZFile
| 1 |
python
|
def get_short_name_without_ext(path):
' '
return os.path.splitext(get_short_name_with_ext(path))[0]
|
def get_short_name_without_ext(path):
' '
return os.path.splitext(get_short_name_with_ext(path))[0]<|docstring|>get file short name without ext, for example: "c:/1.txt" will return "1"<|endoftext|>
|
6420efe8fd9b192481de0ac9d4cab4e2316273673db754a4b21ae3925432aafb
|
def get_short_name_with_ext(path):
' get file short name without ext, for example: "c:/1.txt" will return "1.txt" '
return os.path.basename(path)
|
get file short name without ext, for example: "c:/1.txt" will return "1.txt"
|
ezfile/__init__.py
|
get_short_name_with_ext
|
menzi11/EZFile
| 1 |
python
|
def get_short_name_with_ext(path):
' '
return os.path.basename(path)
|
def get_short_name_with_ext(path):
' '
return os.path.basename(path)<|docstring|>get file short name without ext, for example: "c:/1.txt" will return "1.txt"<|endoftext|>
|
9f96e2e13f87c88f11d5988bbc7bd63a7f0879ac0180d861138460bf20b4234f
|
def get_child_file(path, child_name):
' get child file of a path( no matter if the child file exists )\n for example, "get_child_file(\'c:/\',\'1.txt\')" will return "c:/1.txt" '
return os.path.join(path, child_name)
|
get child file of a path( no matter if the child file exists )
for example, "get_child_file('c:/','1.txt')" will return "c:/1.txt"
|
ezfile/__init__.py
|
get_child_file
|
menzi11/EZFile
| 1 |
python
|
def get_child_file(path, child_name):
' get child file of a path( no matter if the child file exists )\n for example, "get_child_file(\'c:/\',\'1.txt\')" will return "c:/1.txt" '
return os.path.join(path, child_name)
|
def get_child_file(path, child_name):
' get child file of a path( no matter if the child file exists )\n for example, "get_child_file(\'c:/\',\'1.txt\')" will return "c:/1.txt" '
return os.path.join(path, child_name)<|docstring|>get child file of a path( no matter if the child file exists )
for example, "get_child_file('c:/','1.txt')" will return "c:/1.txt"<|endoftext|>
|
124306baaee1b2b7123a2304b4cd630721f2420ecf0f47a05eb6d9617adc412a
|
def get_sibling_file(path, siblingFileName):
" get sibling file of a path. for example, get_sibling_file('c:/1.txt','2.txt') will return 'c:/2.txt' "
return get_parent_dir(path).get_child_file(siblingFileName)
|
get sibling file of a path. for example, get_sibling_file('c:/1.txt','2.txt') will return 'c:/2.txt'
|
ezfile/__init__.py
|
get_sibling_file
|
menzi11/EZFile
| 1 |
python
|
def get_sibling_file(path, siblingFileName):
" "
return get_parent_dir(path).get_child_file(siblingFileName)
|
def get_sibling_file(path, siblingFileName):
" "
return get_parent_dir(path).get_child_file(siblingFileName)<|docstring|>get sibling file of a path. for example, get_sibling_file('c:/1.txt','2.txt') will return 'c:/2.txt'<|endoftext|>
|
95745f5346ef3752934535f5bf6182880bca14a6d0b71f8ed75d020c717e76f2
|
def get_parent_dir(path):
" get parent dir, get_parant_dir('c:/1.txt') will return 'c:/' "
return os.path.abspath(os.path.join(path, '..'))
|
get parent dir, get_parant_dir('c:/1.txt') will return 'c:/'
|
ezfile/__init__.py
|
get_parent_dir
|
menzi11/EZFile
| 1 |
python
|
def get_parent_dir(path):
" "
return os.path.abspath(os.path.join(path, '..'))
|
def get_parent_dir(path):
" "
return os.path.abspath(os.path.join(path, '..'))<|docstring|>get parent dir, get_parant_dir('c:/1.txt') will return 'c:/'<|endoftext|>
|
fd5a7617de03c7ff21bacdd594d40610e9c5df303f1b9af82791935c0047c3f1
|
def create_dir(path):
' create a dir. if the dir exists, than do nothing. '
if (not exists_as_dir(path)):
os.makedirs(path)
|
create a dir. if the dir exists, than do nothing.
|
ezfile/__init__.py
|
create_dir
|
menzi11/EZFile
| 1 |
python
|
def create_dir(path):
' '
if (not exists_as_dir(path)):
os.makedirs(path)
|
def create_dir(path):
' '
if (not exists_as_dir(path)):
os.makedirs(path)<|docstring|>create a dir. if the dir exists, than do nothing.<|endoftext|>
|
b2e2f8185ce378d7fa9098c8a891c4bbf34b2b775b51332309f719ad31a5ed5b
|
def with_new_ext(path, newExt):
' change files ext, if path is a dir, than do nothing. '
if (get_ext(path) == ''):
return
if ('.' not in newExt[0]):
newExt = ('.' + newExt)
path = (get_full_path_without_ext(path) + newExt)
return path
|
change files ext, if path is a dir, than do nothing.
|
ezfile/__init__.py
|
with_new_ext
|
menzi11/EZFile
| 1 |
python
|
def with_new_ext(path, newExt):
' '
if (get_ext(path) == ):
return
if ('.' not in newExt[0]):
newExt = ('.' + newExt)
path = (get_full_path_without_ext(path) + newExt)
return path
|
def with_new_ext(path, newExt):
' '
if (get_ext(path) == ):
return
if ('.' not in newExt[0]):
newExt = ('.' + newExt)
path = (get_full_path_without_ext(path) + newExt)
return path<|docstring|>change files ext, if path is a dir, than do nothing.<|endoftext|>
|
ceb16459b6068ff7d2ef4cfb83561004b3b7e225b2d1c2121e877759a29cf114
|
def move_to(path, target):
'将文件夹或文件移动到新的位置,如果新的位置已经存在,则返回False'
if (exists_as_file(path) and (not exists_as_file(target))):
create_dir(get_parent_dir(target))
shutil.move(get_full_path_with_ext(path), get_full_path_with_ext(target))
elif (exists_as_dir(path) and (not exists_as_file(target))):
shutil.move(get_full_path_with_ext(path), get_full_path_with_ext(target))
return True
|
将文件夹或文件移动到新的位置,如果新的位置已经存在,则返回False
|
ezfile/__init__.py
|
move_to
|
menzi11/EZFile
| 1 |
python
|
def move_to(path, target):
if (exists_as_file(path) and (not exists_as_file(target))):
create_dir(get_parent_dir(target))
shutil.move(get_full_path_with_ext(path), get_full_path_with_ext(target))
elif (exists_as_dir(path) and (not exists_as_file(target))):
shutil.move(get_full_path_with_ext(path), get_full_path_with_ext(target))
return True
|
def move_to(path, target):
if (exists_as_file(path) and (not exists_as_file(target))):
create_dir(get_parent_dir(target))
shutil.move(get_full_path_with_ext(path), get_full_path_with_ext(target))
elif (exists_as_dir(path) and (not exists_as_file(target))):
shutil.move(get_full_path_with_ext(path), get_full_path_with_ext(target))
return True<|docstring|>将文件夹或文件移动到新的位置,如果新的位置已经存在,则返回False<|endoftext|>
|
0c43324bcfcbad730fae27eab562c208309d273820ae97608fa39e702850ca4e
|
def remove(path):
'删除文件或文件夹,不经过回收站'
if exists_as_dir(path):
shutil.rmtree(get_full_path_with_ext(path))
elif exists_as_file(path):
os.remove(get_full_path_with_ext(path))
|
删除文件或文件夹,不经过回收站
|
ezfile/__init__.py
|
remove
|
menzi11/EZFile
| 1 |
python
|
def remove(path):
if exists_as_dir(path):
shutil.rmtree(get_full_path_with_ext(path))
elif exists_as_file(path):
os.remove(get_full_path_with_ext(path))
|
def remove(path):
if exists_as_dir(path):
shutil.rmtree(get_full_path_with_ext(path))
elif exists_as_file(path):
os.remove(get_full_path_with_ext(path))<|docstring|>删除文件或文件夹,不经过回收站<|endoftext|>
|
6ba7fad4ba176256e3e7c6a293d1a9cc620e1686f0e44dbc786b4fead454bcf4
|
def copy_to(path, target, replaceIfTargetExist=False):
'将文件拷贝到target中,target可为一个ezfile或者str. 若target已经存在,则根据replaceIfTargetExist选项来决定是否覆盖新文件. 返回是否复制成功.'
if (exists_as_file(target) and (not replaceIfTargetExist)):
return False
if exists_as_file(target):
remove(target)
if (exists_as_file(path) and (not exists_as_file(target))):
create_dir(get_parent_dir(target))
shutil.copy2(get_full_path_with_ext(path), get_full_path_with_ext(target))
elif (exists_as_dir(path) and (not exists_as_file(target))):
shutil.copytree(get_full_path_with_ext(path), get_full_path_with_ext(target))
return True
|
将文件拷贝到target中,target可为一个ezfile或者str. 若target已经存在,则根据replaceIfTargetExist选项来决定是否覆盖新文件. 返回是否复制成功.
|
ezfile/__init__.py
|
copy_to
|
menzi11/EZFile
| 1 |
python
|
def copy_to(path, target, replaceIfTargetExist=False):
if (exists_as_file(target) and (not replaceIfTargetExist)):
return False
if exists_as_file(target):
remove(target)
if (exists_as_file(path) and (not exists_as_file(target))):
create_dir(get_parent_dir(target))
shutil.copy2(get_full_path_with_ext(path), get_full_path_with_ext(target))
elif (exists_as_dir(path) and (not exists_as_file(target))):
shutil.copytree(get_full_path_with_ext(path), get_full_path_with_ext(target))
return True
|
def copy_to(path, target, replaceIfTargetExist=False):
if (exists_as_file(target) and (not replaceIfTargetExist)):
return False
if exists_as_file(target):
remove(target)
if (exists_as_file(path) and (not exists_as_file(target))):
create_dir(get_parent_dir(target))
shutil.copy2(get_full_path_with_ext(path), get_full_path_with_ext(target))
elif (exists_as_dir(path) and (not exists_as_file(target))):
shutil.copytree(get_full_path_with_ext(path), get_full_path_with_ext(target))
return True<|docstring|>将文件拷贝到target中,target可为一个ezfile或者str. 若target已经存在,则根据replaceIfTargetExist选项来决定是否覆盖新文件. 返回是否复制成功.<|endoftext|>
|
1da0ca9a5f9a94a27736d32204c08dbd7f479eb5d73857df3986af8dec13f4bf
|
def rename(path, newname, use_relax_filename=True, include_ext=False):
" rename a file. if 'use_relax_filename' enabled, than unsupported char will remove auto. "
t = ['?', '*', '/', '\\', '<', '>', ':', '"', '|']
for r in t:
if ((not use_relax_filename) and (r in newname)):
return False
newname = newname.replace(r, '')
X = os.path.join(get_parent_dir(path), newname)
if exists(path):
if include_ext:
X = (X + get_ext(path))
shutil.move(get_full_path_with_ext(path), X)
path = X
return True
|
rename a file. if 'use_relax_filename' enabled, than unsupported char will remove auto.
|
ezfile/__init__.py
|
rename
|
menzi11/EZFile
| 1 |
python
|
def rename(path, newname, use_relax_filename=True, include_ext=False):
" "
t = ['?', '*', '/', '\\', '<', '>', ':', '"', '|']
for r in t:
if ((not use_relax_filename) and (r in newname)):
return False
newname = newname.replace(r, )
X = os.path.join(get_parent_dir(path), newname)
if exists(path):
if include_ext:
X = (X + get_ext(path))
shutil.move(get_full_path_with_ext(path), X)
path = X
return True
|
def rename(path, newname, use_relax_filename=True, include_ext=False):
" "
t = ['?', '*', '/', '\\', '<', '>', ':', '"', '|']
for r in t:
if ((not use_relax_filename) and (r in newname)):
return False
newname = newname.replace(r, )
X = os.path.join(get_parent_dir(path), newname)
if exists(path):
if include_ext:
X = (X + get_ext(path))
shutil.move(get_full_path_with_ext(path), X)
path = X
return True<|docstring|>rename a file. if 'use_relax_filename' enabled, than unsupported char will remove auto.<|endoftext|>
|
43998d674ff5524cf8ebd30f40d33d1280d3282360d007e80134bff67e958f62
|
def detect_text_coding(path):
'以文本形式打开当前文件并猜测其字符集编码'
f = open(get_full_path_with_ext(path), 'rb')
tt = f.read(200)
f.close()
result = chardet.detect(tt)
return result['encoding']
|
以文本形式打开当前文件并猜测其字符集编码
|
ezfile/__init__.py
|
detect_text_coding
|
menzi11/EZFile
| 1 |
python
|
def detect_text_coding(path):
f = open(get_full_path_with_ext(path), 'rb')
tt = f.read(200)
f.close()
result = chardet.detect(tt)
return result['encoding']
|
def detect_text_coding(path):
f = open(get_full_path_with_ext(path), 'rb')
tt = f.read(200)
f.close()
result = chardet.detect(tt)
return result['encoding']<|docstring|>以文本形式打开当前文件并猜测其字符集编码<|endoftext|>
|
38f998dde78f8efbb2523b9513a7e75a248e7c434b1f928ab99c4bdec1f1c9e5
|
def find_child_files(path, searchRecursively=False, wildCardPattern='.'):
'在当前目录中查找文件,若选择searchRecursively则代表着搜索包含子目录, wildCardPattern意思是只搜索扩展名为".xxx"的文件,也可留空代表搜索全部文件. '
all_search_list = ['.', '.*', '*', '']
tmp = list()
if (not exists_as_dir(path)):
return tmp
for (fpath, _, fnames) in os.walk(get_full_path_with_ext(path)):
if ((fpath is not get_full_path_with_ext(path)) and (not searchRecursively)):
break
for filename in fnames:
if (wildCardPattern in all_search_list):
pass
else:
if (wildCardPattern[0] != '.'):
wildCardPattern = ('.' + wildCardPattern)
if ((not filename.endswith(wildCardPattern)) and (wildCardPattern is not '.')):
continue
tmp.append(os.path.join(fpath, filename))
return tmp
|
在当前目录中查找文件,若选择searchRecursively则代表着搜索包含子目录, wildCardPattern意思是只搜索扩展名为".xxx"的文件,也可留空代表搜索全部文件.
|
ezfile/__init__.py
|
find_child_files
|
menzi11/EZFile
| 1 |
python
|
def find_child_files(path, searchRecursively=False, wildCardPattern='.'):
' '
all_search_list = ['.', '.*', '*', ]
tmp = list()
if (not exists_as_dir(path)):
return tmp
for (fpath, _, fnames) in os.walk(get_full_path_with_ext(path)):
if ((fpath is not get_full_path_with_ext(path)) and (not searchRecursively)):
break
for filename in fnames:
if (wildCardPattern in all_search_list):
pass
else:
if (wildCardPattern[0] != '.'):
wildCardPattern = ('.' + wildCardPattern)
if ((not filename.endswith(wildCardPattern)) and (wildCardPattern is not '.')):
continue
tmp.append(os.path.join(fpath, filename))
return tmp
|
def find_child_files(path, searchRecursively=False, wildCardPattern='.'):
' '
all_search_list = ['.', '.*', '*', ]
tmp = list()
if (not exists_as_dir(path)):
return tmp
for (fpath, _, fnames) in os.walk(get_full_path_with_ext(path)):
if ((fpath is not get_full_path_with_ext(path)) and (not searchRecursively)):
break
for filename in fnames:
if (wildCardPattern in all_search_list):
pass
else:
if (wildCardPattern[0] != '.'):
wildCardPattern = ('.' + wildCardPattern)
if ((not filename.endswith(wildCardPattern)) and (wildCardPattern is not '.')):
continue
tmp.append(os.path.join(fpath, filename))
return tmp<|docstring|>在当前目录中查找文件,若选择searchRecursively则代表着搜索包含子目录, wildCardPattern意思是只搜索扩展名为".xxx"的文件,也可留空代表搜索全部文件.<|endoftext|>
|
ce48fd56d407bedb41734f2af6a151618dc25a8d9e36f2514523f5249cb132e5
|
def perturb_graphs(graphs, perturbation_type, random_seed=42, n_repetitions=1, **kwargs):
'\n Take a list of networkx graphs as input and perturb them according\n to a specified perturbation and degree of perturbation. \n\n Parameters\n ----------\n graphs : a list of networkx graphs\n perturbation_type: one of [AddEdges, RemoveEdges, RewireEdges, AddConnectedNodes]\n random_seed: the desired random seed\n **kwargs: any perturbation parameter required for the chosen perturbation\n\n Returns\n -------\n perturbation_values: values of perturbation levels\n perturbed_graphs : a list of perturbed networkx graphs\n '
random_state = np.random.RandomState(random_seed)
parameters_for_perturbation = {}
parameter_values = list(np.arange(0.15, 1.0, 0.05))
parameters = 'p_add'
print(parameter_values)
perturbation_class = getattr(perturbations, perturbation_type)
perturbed_graphs = []
perturbation_parameters = []
parameter_dict = {parameters: parameter_values}
for i in parameter_values:
perturbation_dict = {parameters: [i]}
perturbation_parameters.append(perturbation_dict)
cur_perturbation = []
perturbation = perturbation_class(random_state=random_state, **perturbation_dict)
def perturb_and_convert(graph):
return nx.to_scipy_sparse_matrix(perturbation(graph))
for _ in range(n_repetitions):
cur_perturbation.extend(map(perturb_and_convert, graphs))
perturbed_graphs.append(cur_perturbation)
return (parameter_values, perturbed_graphs)
|
Take a list of networkx graphs as input and perturb them according
to a specified perturbation and degree of perturbation.
Parameters
----------
graphs : a list of networkx graphs
perturbation_type: one of [AddEdges, RemoveEdges, RewireEdges, AddConnectedNodes]
random_seed: the desired random seed
**kwargs: any perturbation parameter required for the chosen perturbation
Returns
-------
perturbation_values: values of perturbation levels
perturbed_graphs : a list of perturbed networkx graphs
|
src/perturb_graphs.py
|
perturb_graphs
|
BorgwardtLab/ggme
| 6 |
python
|
def perturb_graphs(graphs, perturbation_type, random_seed=42, n_repetitions=1, **kwargs):
'\n Take a list of networkx graphs as input and perturb them according\n to a specified perturbation and degree of perturbation. \n\n Parameters\n ----------\n graphs : a list of networkx graphs\n perturbation_type: one of [AddEdges, RemoveEdges, RewireEdges, AddConnectedNodes]\n random_seed: the desired random seed\n **kwargs: any perturbation parameter required for the chosen perturbation\n\n Returns\n -------\n perturbation_values: values of perturbation levels\n perturbed_graphs : a list of perturbed networkx graphs\n '
random_state = np.random.RandomState(random_seed)
parameters_for_perturbation = {}
parameter_values = list(np.arange(0.15, 1.0, 0.05))
parameters = 'p_add'
print(parameter_values)
perturbation_class = getattr(perturbations, perturbation_type)
perturbed_graphs = []
perturbation_parameters = []
parameter_dict = {parameters: parameter_values}
for i in parameter_values:
perturbation_dict = {parameters: [i]}
perturbation_parameters.append(perturbation_dict)
cur_perturbation = []
perturbation = perturbation_class(random_state=random_state, **perturbation_dict)
def perturb_and_convert(graph):
return nx.to_scipy_sparse_matrix(perturbation(graph))
for _ in range(n_repetitions):
cur_perturbation.extend(map(perturb_and_convert, graphs))
perturbed_graphs.append(cur_perturbation)
return (parameter_values, perturbed_graphs)
|
def perturb_graphs(graphs, perturbation_type, random_seed=42, n_repetitions=1, **kwargs):
'\n Take a list of networkx graphs as input and perturb them according\n to a specified perturbation and degree of perturbation. \n\n Parameters\n ----------\n graphs : a list of networkx graphs\n perturbation_type: one of [AddEdges, RemoveEdges, RewireEdges, AddConnectedNodes]\n random_seed: the desired random seed\n **kwargs: any perturbation parameter required for the chosen perturbation\n\n Returns\n -------\n perturbation_values: values of perturbation levels\n perturbed_graphs : a list of perturbed networkx graphs\n '
random_state = np.random.RandomState(random_seed)
parameters_for_perturbation = {}
parameter_values = list(np.arange(0.15, 1.0, 0.05))
parameters = 'p_add'
print(parameter_values)
perturbation_class = getattr(perturbations, perturbation_type)
perturbed_graphs = []
perturbation_parameters = []
parameter_dict = {parameters: parameter_values}
for i in parameter_values:
perturbation_dict = {parameters: [i]}
perturbation_parameters.append(perturbation_dict)
cur_perturbation = []
perturbation = perturbation_class(random_state=random_state, **perturbation_dict)
def perturb_and_convert(graph):
return nx.to_scipy_sparse_matrix(perturbation(graph))
for _ in range(n_repetitions):
cur_perturbation.extend(map(perturb_and_convert, graphs))
perturbed_graphs.append(cur_perturbation)
return (parameter_values, perturbed_graphs)<|docstring|>Take a list of networkx graphs as input and perturb them according
to a specified perturbation and degree of perturbation.
Parameters
----------
graphs : a list of networkx graphs
perturbation_type: one of [AddEdges, RemoveEdges, RewireEdges, AddConnectedNodes]
random_seed: the desired random seed
**kwargs: any perturbation parameter required for the chosen perturbation
Returns
-------
perturbation_values: values of perturbation levels
perturbed_graphs : a list of perturbed networkx graphs<|endoftext|>
|
660e6ab118aff2f7c883c3a729a7148dc61b04a1c7cf1a6b88b2ff49a50a9b0e
|
def fast_update(self, objs: Iterable[Model], fields: Iterable[str], batch_size: Optional[int]=None) -> int:
'\n Faster alternative for ``bulk_update`` with the same method signature.\n\n Due to the way the update works internally with constant VALUES tables,\n f-expressions cannot be used anymore. Beside that it has similar\n restrictions as ``bulk_update`` (e.g. primary keys cannot be updated).\n\n The internal implementation relies on recent versions of database\n backends and will fall back to ``bulk_update`` if the backend is not\n supported. It will also invoke ``bulk_update`` for non-local fields\n (e.g. for multi-table inheritance).\n\n ``batch_size`` can be set to much higher values than typically\n for ``bulk_update`` (if needed at all).\n\n Returns the number of affected rows.\n '
if (not objs):
return 0
objs = tuple(objs)
fields_ = set((fields or []))
sanity_check(self.model, objs, fields_, 'fast_update()', batch_size)
return fast_update(self, objs, fields_, batch_size)
|
Faster alternative for ``bulk_update`` with the same method signature.
Due to the way the update works internally with constant VALUES tables,
f-expressions cannot be used anymore. Beside that it has similar
restrictions as ``bulk_update`` (e.g. primary keys cannot be updated).
The internal implementation relies on recent versions of database
backends and will fall back to ``bulk_update`` if the backend is not
supported. It will also invoke ``bulk_update`` for non-local fields
(e.g. for multi-table inheritance).
``batch_size`` can be set to much higher values than typically
for ``bulk_update`` (if needed at all).
Returns the number of affected rows.
|
fast_update/query.py
|
fast_update
|
netzkolchose/django-fast-update
| 0 |
python
|
def fast_update(self, objs: Iterable[Model], fields: Iterable[str], batch_size: Optional[int]=None) -> int:
'\n Faster alternative for ``bulk_update`` with the same method signature.\n\n Due to the way the update works internally with constant VALUES tables,\n f-expressions cannot be used anymore. Beside that it has similar\n restrictions as ``bulk_update`` (e.g. primary keys cannot be updated).\n\n The internal implementation relies on recent versions of database\n backends and will fall back to ``bulk_update`` if the backend is not\n supported. It will also invoke ``bulk_update`` for non-local fields\n (e.g. for multi-table inheritance).\n\n ``batch_size`` can be set to much higher values than typically\n for ``bulk_update`` (if needed at all).\n\n Returns the number of affected rows.\n '
if (not objs):
return 0
objs = tuple(objs)
fields_ = set((fields or []))
sanity_check(self.model, objs, fields_, 'fast_update()', batch_size)
return fast_update(self, objs, fields_, batch_size)
|
def fast_update(self, objs: Iterable[Model], fields: Iterable[str], batch_size: Optional[int]=None) -> int:
'\n Faster alternative for ``bulk_update`` with the same method signature.\n\n Due to the way the update works internally with constant VALUES tables,\n f-expressions cannot be used anymore. Beside that it has similar\n restrictions as ``bulk_update`` (e.g. primary keys cannot be updated).\n\n The internal implementation relies on recent versions of database\n backends and will fall back to ``bulk_update`` if the backend is not\n supported. It will also invoke ``bulk_update`` for non-local fields\n (e.g. for multi-table inheritance).\n\n ``batch_size`` can be set to much higher values than typically\n for ``bulk_update`` (if needed at all).\n\n Returns the number of affected rows.\n '
if (not objs):
return 0
objs = tuple(objs)
fields_ = set((fields or []))
sanity_check(self.model, objs, fields_, 'fast_update()', batch_size)
return fast_update(self, objs, fields_, batch_size)<|docstring|>Faster alternative for ``bulk_update`` with the same method signature.
Due to the way the update works internally with constant VALUES tables,
f-expressions cannot be used anymore. Beside that it has similar
restrictions as ``bulk_update`` (e.g. primary keys cannot be updated).
The internal implementation relies on recent versions of database
backends and will fall back to ``bulk_update`` if the backend is not
supported. It will also invoke ``bulk_update`` for non-local fields
(e.g. for multi-table inheritance).
``batch_size`` can be set to much higher values than typically
for ``bulk_update`` (if needed at all).
Returns the number of affected rows.<|endoftext|>
|
7067fdc1b567fda0bbde317dc435f67fd9f7bfa7b59093fff5aedbe135f5cdc8
|
def copy_update(self, objs: Iterable[Model], fields: Iterable[str], field_encoders: Optional[Dict[(str, Any)]]=None, encoding: Optional[str]=None) -> int:
"\n PostgreSQL only method (raises an exception on any other backend)\n to update a large amount of model instances via COPY FROM.\n The method follows the same interface idea of ``bulk_update`` or ``fast_update``,\n but will perform much better for bigger updates, even than ``fast_update``.\n\n Other than for ``fast_update``, there is no ``batch_size`` argument anymore,\n as the update is always done in one single big batch by copying the data into\n a temporary table and run the update from there.\n\n For the data transport postgres' TEXT format is used. For this the field values\n get encoded by special encoders. The encoders are globally registered for\n django's standard field types (works similar to `get_db_prep_value`).\n With ``field_encoders`` custom encoders can be attached to update fields\n for a single call. This might come handy for additional conversion work or\n further speedup by omitting the base type checks of the default encoders\n (do this only if the data was checked by other means, otherwise malformed\n updates may happen).\n\n ``encoding`` overwrites the text encoding used in the COPY FROM transmission\n (default is psycopg's connection encoding).\n\n Returns the number of affected rows.\n\n NOTE: The underlying implementation is only a PoC and probably will be replaced\n soon by the much safer and superior COPY support of psycopg3.\n "
self._for_write = True
connection = connections[self.db]
if (connection.vendor != 'postgresql'):
raise NotSupportedError(f'copy_update() is not supported on "{connection.vendor}" backend')
from .copy import copy_update
if (not objs):
return 0
objs = tuple(objs)
fields_ = set((fields or []))
sanity_check(self.model, objs, fields_, 'copy_update()')
return copy_update(self, objs, fields_, field_encoders, encoding)
|
PostgreSQL only method (raises an exception on any other backend)
to update a large amount of model instances via COPY FROM.
The method follows the same interface idea of ``bulk_update`` or ``fast_update``,
but will perform much better for bigger updates, even than ``fast_update``.
Other than for ``fast_update``, there is no ``batch_size`` argument anymore,
as the update is always done in one single big batch by copying the data into
a temporary table and run the update from there.
For the data transport postgres' TEXT format is used. For this the field values
get encoded by special encoders. The encoders are globally registered for
django's standard field types (works similar to `get_db_prep_value`).
With ``field_encoders`` custom encoders can be attached to update fields
for a single call. This might come handy for additional conversion work or
further speedup by omitting the base type checks of the default encoders
(do this only if the data was checked by other means, otherwise malformed
updates may happen).
``encoding`` overwrites the text encoding used in the COPY FROM transmission
(default is psycopg's connection encoding).
Returns the number of affected rows.
NOTE: The underlying implementation is only a PoC and probably will be replaced
soon by the much safer and superior COPY support of psycopg3.
|
fast_update/query.py
|
copy_update
|
netzkolchose/django-fast-update
| 0 |
python
|
def copy_update(self, objs: Iterable[Model], fields: Iterable[str], field_encoders: Optional[Dict[(str, Any)]]=None, encoding: Optional[str]=None) -> int:
"\n PostgreSQL only method (raises an exception on any other backend)\n to update a large amount of model instances via COPY FROM.\n The method follows the same interface idea of ``bulk_update`` or ``fast_update``,\n but will perform much better for bigger updates, even than ``fast_update``.\n\n Other than for ``fast_update``, there is no ``batch_size`` argument anymore,\n as the update is always done in one single big batch by copying the data into\n a temporary table and run the update from there.\n\n For the data transport postgres' TEXT format is used. For this the field values\n get encoded by special encoders. The encoders are globally registered for\n django's standard field types (works similar to `get_db_prep_value`).\n With ``field_encoders`` custom encoders can be attached to update fields\n for a single call. This might come handy for additional conversion work or\n further speedup by omitting the base type checks of the default encoders\n (do this only if the data was checked by other means, otherwise malformed\n updates may happen).\n\n ``encoding`` overwrites the text encoding used in the COPY FROM transmission\n (default is psycopg's connection encoding).\n\n Returns the number of affected rows.\n\n NOTE: The underlying implementation is only a PoC and probably will be replaced\n soon by the much safer and superior COPY support of psycopg3.\n "
self._for_write = True
connection = connections[self.db]
if (connection.vendor != 'postgresql'):
raise NotSupportedError(f'copy_update() is not supported on "{connection.vendor}" backend')
from .copy import copy_update
if (not objs):
return 0
objs = tuple(objs)
fields_ = set((fields or []))
sanity_check(self.model, objs, fields_, 'copy_update()')
return copy_update(self, objs, fields_, field_encoders, encoding)
|
def copy_update(self, objs: Iterable[Model], fields: Iterable[str], field_encoders: Optional[Dict[(str, Any)]]=None, encoding: Optional[str]=None) -> int:
"\n PostgreSQL only method (raises an exception on any other backend)\n to update a large amount of model instances via COPY FROM.\n The method follows the same interface idea of ``bulk_update`` or ``fast_update``,\n but will perform much better for bigger updates, even than ``fast_update``.\n\n Other than for ``fast_update``, there is no ``batch_size`` argument anymore,\n as the update is always done in one single big batch by copying the data into\n a temporary table and run the update from there.\n\n For the data transport postgres' TEXT format is used. For this the field values\n get encoded by special encoders. The encoders are globally registered for\n django's standard field types (works similar to `get_db_prep_value`).\n With ``field_encoders`` custom encoders can be attached to update fields\n for a single call. This might come handy for additional conversion work or\n further speedup by omitting the base type checks of the default encoders\n (do this only if the data was checked by other means, otherwise malformed\n updates may happen).\n\n ``encoding`` overwrites the text encoding used in the COPY FROM transmission\n (default is psycopg's connection encoding).\n\n Returns the number of affected rows.\n\n NOTE: The underlying implementation is only a PoC and probably will be replaced\n soon by the much safer and superior COPY support of psycopg3.\n "
self._for_write = True
connection = connections[self.db]
if (connection.vendor != 'postgresql'):
raise NotSupportedError(f'copy_update() is not supported on "{connection.vendor}" backend')
from .copy import copy_update
if (not objs):
return 0
objs = tuple(objs)
fields_ = set((fields or []))
sanity_check(self.model, objs, fields_, 'copy_update()')
return copy_update(self, objs, fields_, field_encoders, encoding)<|docstring|>PostgreSQL only method (raises an exception on any other backend)
to update a large amount of model instances via COPY FROM.
The method follows the same interface idea of ``bulk_update`` or ``fast_update``,
but will perform much better for bigger updates, even than ``fast_update``.
Other than for ``fast_update``, there is no ``batch_size`` argument anymore,
as the update is always done in one single big batch by copying the data into
a temporary table and run the update from there.
For the data transport postgres' TEXT format is used. For this the field values
get encoded by special encoders. The encoders are globally registered for
django's standard field types (works similar to `get_db_prep_value`).
With ``field_encoders`` custom encoders can be attached to update fields
for a single call. This might come handy for additional conversion work or
further speedup by omitting the base type checks of the default encoders
(do this only if the data was checked by other means, otherwise malformed
updates may happen).
``encoding`` overwrites the text encoding used in the COPY FROM transmission
(default is psycopg's connection encoding).
Returns the number of affected rows.
NOTE: The underlying implementation is only a PoC and probably will be replaced
soon by the much safer and superior COPY support of psycopg3.<|endoftext|>
|
5c82f23efff883d98307edbb59d111754b8460ebbce3d878ebfd89db682ebbfd
|
@lm.user_loader
def load_user(id):
'加载用户信息回调'
return User.query.get(int(id))
|
加载用户信息回调
|
app/models.py
|
load_user
|
superwongo/flask-mega-tutorial
| 2 |
python
|
@lm.user_loader
def load_user(id):
return User.query.get(int(id))
|
@lm.user_loader
def load_user(id):
return User.query.get(int(id))<|docstring|>加载用户信息回调<|endoftext|>
|
ba768904b97b15868057289283010453195d952cdb226b8899ae7de47acfb62a
|
@classmethod
def search(cls, expression, page, per_page):
'搜索'
(ids, total) = query_index(cls.__tablename__, expression, page, per_page)
if (total == 0):
return (cls.query.filter_by(id=0), 0)
when = []
for i in range(len(ids)):
when.append((ids[i], i))
return (cls.query.filter(cls.id.in_(ids)).order_by(db.case(when, value=cls.id)), total)
|
搜索
|
app/models.py
|
search
|
superwongo/flask-mega-tutorial
| 2 |
python
|
@classmethod
def search(cls, expression, page, per_page):
(ids, total) = query_index(cls.__tablename__, expression, page, per_page)
if (total == 0):
return (cls.query.filter_by(id=0), 0)
when = []
for i in range(len(ids)):
when.append((ids[i], i))
return (cls.query.filter(cls.id.in_(ids)).order_by(db.case(when, value=cls.id)), total)
|
@classmethod
def search(cls, expression, page, per_page):
(ids, total) = query_index(cls.__tablename__, expression, page, per_page)
if (total == 0):
return (cls.query.filter_by(id=0), 0)
when = []
for i in range(len(ids)):
when.append((ids[i], i))
return (cls.query.filter(cls.id.in_(ids)).order_by(db.case(when, value=cls.id)), total)<|docstring|>搜索<|endoftext|>
|
0482e5c4aebcef6a9429f955e7e7fc8603cf8cd1d866e9190d00bb42c7d609a5
|
@classmethod
def before_commit(cls, session):
'会话提交前,记录对象变更'
session._changes = {'add': [obj for obj in session.new if isinstance(obj, cls)], 'update': [obj for obj in session.dirty if isinstance(obj, cls)], 'delete': [obj for obj in session.deleted if isinstance(obj, cls)]}
|
会话提交前,记录对象变更
|
app/models.py
|
before_commit
|
superwongo/flask-mega-tutorial
| 2 |
python
|
@classmethod
def before_commit(cls, session):
session._changes = {'add': [obj for obj in session.new if isinstance(obj, cls)], 'update': [obj for obj in session.dirty if isinstance(obj, cls)], 'delete': [obj for obj in session.deleted if isinstance(obj, cls)]}
|
@classmethod
def before_commit(cls, session):
session._changes = {'add': [obj for obj in session.new if isinstance(obj, cls)], 'update': [obj for obj in session.dirty if isinstance(obj, cls)], 'delete': [obj for obj in session.deleted if isinstance(obj, cls)]}<|docstring|>会话提交前,记录对象变更<|endoftext|>
|
aba5fc4abf199074700cdffc3e2c6c256c619f256c520248708aa98cfcebee25
|
@classmethod
def after_commit(cls, session):
'会话提交后,根据记录的变更同步elasticsearch'
for obj in session._changes['add']:
add_to_index(cls.__tablename__, obj)
for obj in session._changes['update']:
add_to_index(cls.__tablename__, obj)
for obj in session._changes['delete']:
remove_from_index(cls.__tablename__, obj)
session._changes = None
|
会话提交后,根据记录的变更同步elasticsearch
|
app/models.py
|
after_commit
|
superwongo/flask-mega-tutorial
| 2 |
python
|
@classmethod
def after_commit(cls, session):
for obj in session._changes['add']:
add_to_index(cls.__tablename__, obj)
for obj in session._changes['update']:
add_to_index(cls.__tablename__, obj)
for obj in session._changes['delete']:
remove_from_index(cls.__tablename__, obj)
session._changes = None
|
@classmethod
def after_commit(cls, session):
for obj in session._changes['add']:
add_to_index(cls.__tablename__, obj)
for obj in session._changes['update']:
add_to_index(cls.__tablename__, obj)
for obj in session._changes['delete']:
remove_from_index(cls.__tablename__, obj)
session._changes = None<|docstring|>会话提交后,根据记录的变更同步elasticsearch<|endoftext|>
|
d15c482be5f536714e021770207862620975e7eb87fc0f3219275da90f7fa1c6
|
@classmethod
def reindex(cls):
'用于初始化数据库已有数据'
for obj in cls.query:
add_to_index(cls.__tablename__, obj)
|
用于初始化数据库已有数据
|
app/models.py
|
reindex
|
superwongo/flask-mega-tutorial
| 2 |
python
|
@classmethod
def reindex(cls):
for obj in cls.query:
add_to_index(cls.__tablename__, obj)
|
@classmethod
def reindex(cls):
for obj in cls.query:
add_to_index(cls.__tablename__, obj)<|docstring|>用于初始化数据库已有数据<|endoftext|>
|
3603e38b9ed3f27b17c83a6b0bf20cf3dc7314ab5ec7b783e872cc8ef6d3b158
|
def set_password(self, raw_password):
'密码加密'
self.password_hash = generate_password_hash(raw_password)
|
密码加密
|
app/models.py
|
set_password
|
superwongo/flask-mega-tutorial
| 2 |
python
|
def set_password(self, raw_password):
self.password_hash = generate_password_hash(raw_password)
|
def set_password(self, raw_password):
self.password_hash = generate_password_hash(raw_password)<|docstring|>密码加密<|endoftext|>
|
2653588aefccd3936d74e717a243a72785ad319d1b424e60b820f8ff32ef8f96
|
def check_password(self, raw_password):
'校验密码是否正确'
return check_password_hash(self.password_hash, raw_password)
|
校验密码是否正确
|
app/models.py
|
check_password
|
superwongo/flask-mega-tutorial
| 2 |
python
|
def check_password(self, raw_password):
return check_password_hash(self.password_hash, raw_password)
|
def check_password(self, raw_password):
return check_password_hash(self.password_hash, raw_password)<|docstring|>校验密码是否正确<|endoftext|>
|
f47cb7d779908b4a1e71be83e60ec0802661f60f0b997c4cb9421de8b1a4cbac
|
def is_following(self, user):
'是否存在关注关系'
return (self.followed.filter((followers.c.followed_id == user.id)).count() > 0)
|
是否存在关注关系
|
app/models.py
|
is_following
|
superwongo/flask-mega-tutorial
| 2 |
python
|
def is_following(self, user):
return (self.followed.filter((followers.c.followed_id == user.id)).count() > 0)
|
def is_following(self, user):
return (self.followed.filter((followers.c.followed_id == user.id)).count() > 0)<|docstring|>是否存在关注关系<|endoftext|>
|
3df2bc0cb85855015707d883bc30f166906185ead2e0df055d67f95b794a9f04
|
def follow(self, user):
'添加关注'
if (not self.is_following(user)):
self.followed.append(user)
|
添加关注
|
app/models.py
|
follow
|
superwongo/flask-mega-tutorial
| 2 |
python
|
def follow(self, user):
if (not self.is_following(user)):
self.followed.append(user)
|
def follow(self, user):
if (not self.is_following(user)):
self.followed.append(user)<|docstring|>添加关注<|endoftext|>
|
85899e4e9f6b0de787b871fde3fde7b7f1d9720f2270d776d120a479619594f1
|
def unfollow(self, user):
'取消关注'
if self.is_following(user):
self.followed.remove(user)
|
取消关注
|
app/models.py
|
unfollow
|
superwongo/flask-mega-tutorial
| 2 |
python
|
def unfollow(self, user):
if self.is_following(user):
self.followed.remove(user)
|
def unfollow(self, user):
if self.is_following(user):
self.followed.remove(user)<|docstring|>取消关注<|endoftext|>
|
edf5a03c2d472774f6b9dd8d071146ec27e90a697b746b63fde3384ff1983466
|
def followed_posts(self):
'已关注用户帖子查询'
followed = Post.query.join(followers, (followers.c.followed_id == Post.user_id)).filter((followers.c.follower_id == self.id))
own = Post.query.filter_by(user_id=self.id)
return followed.union(own).order_by(Post.timestamp.desc())
|
已关注用户帖子查询
|
app/models.py
|
followed_posts
|
superwongo/flask-mega-tutorial
| 2 |
python
|
def followed_posts(self):
followed = Post.query.join(followers, (followers.c.followed_id == Post.user_id)).filter((followers.c.follower_id == self.id))
own = Post.query.filter_by(user_id=self.id)
return followed.union(own).order_by(Post.timestamp.desc())
|
def followed_posts(self):
followed = Post.query.join(followers, (followers.c.followed_id == Post.user_id)).filter((followers.c.follower_id == self.id))
own = Post.query.filter_by(user_id=self.id)
return followed.union(own).order_by(Post.timestamp.desc())<|docstring|>已关注用户帖子查询<|endoftext|>
|
b519efc0d4197906b03a2321159eb8562fd2b1f259b66b1ac7dc52ea772e9b17
|
def get_jwt_token(self, expires_in=600):
'获取JWT令牌'
return jwt.encode({'reset_password': self.id, 'exp': (time() + expires_in)}, current_app.config['SECRET_KEY'], algorithm='HS256').decode('utf8')
|
获取JWT令牌
|
app/models.py
|
get_jwt_token
|
superwongo/flask-mega-tutorial
| 2 |
python
|
def get_jwt_token(self, expires_in=600):
return jwt.encode({'reset_password': self.id, 'exp': (time() + expires_in)}, current_app.config['SECRET_KEY'], algorithm='HS256').decode('utf8')
|
def get_jwt_token(self, expires_in=600):
return jwt.encode({'reset_password': self.id, 'exp': (time() + expires_in)}, current_app.config['SECRET_KEY'], algorithm='HS256').decode('utf8')<|docstring|>获取JWT令牌<|endoftext|>
|
89366896dbddfe9aeffc7ddc2fb1f2d364ed2683179b228387e0a9f14459f5bb
|
def new_messages(self):
'查询用户未读信息条数'
last_read_time = (self.last_message_read_time or datetime(1900, 1, 1))
return Message.query.filter_by(recipient=self).filter((Message.timestamp > last_read_time)).count()
|
查询用户未读信息条数
|
app/models.py
|
new_messages
|
superwongo/flask-mega-tutorial
| 2 |
python
|
def new_messages(self):
last_read_time = (self.last_message_read_time or datetime(1900, 1, 1))
return Message.query.filter_by(recipient=self).filter((Message.timestamp > last_read_time)).count()
|
def new_messages(self):
last_read_time = (self.last_message_read_time or datetime(1900, 1, 1))
return Message.query.filter_by(recipient=self).filter((Message.timestamp > last_read_time)).count()<|docstring|>查询用户未读信息条数<|endoftext|>
|
01714259c480b2c2e475afd126442258cdb3a13af02164e2ec4573e1567d2cd0
|
def add_notification(self, name, data):
'新增通知'
self.notifications.filter_by(name=name).delete()
n = Notification(name=name, payload_json=json.dumps(data), user=self)
db.session.add(n)
return n
|
新增通知
|
app/models.py
|
add_notification
|
superwongo/flask-mega-tutorial
| 2 |
python
|
def add_notification(self, name, data):
self.notifications.filter_by(name=name).delete()
n = Notification(name=name, payload_json=json.dumps(data), user=self)
db.session.add(n)
return n
|
def add_notification(self, name, data):
self.notifications.filter_by(name=name).delete()
n = Notification(name=name, payload_json=json.dumps(data), user=self)
db.session.add(n)
return n<|docstring|>新增通知<|endoftext|>
|
d87f9ec1991968e62814b8c910976a7be079e8e84d854efe0ec76e8f4ece0103
|
def launch_task(self, name, description, *args, **kwargs):
'登记任务'
rq_job = current_app.task_queue.enqueue('app.task.{}'.format(name), self.id, *args, **kwargs)
task = Task(id=rq_job.get_id(), name=name, description=description, user=self)
db.session.add(task)
return task
|
登记任务
|
app/models.py
|
launch_task
|
superwongo/flask-mega-tutorial
| 2 |
python
|
def launch_task(self, name, description, *args, **kwargs):
rq_job = current_app.task_queue.enqueue('app.task.{}'.format(name), self.id, *args, **kwargs)
task = Task(id=rq_job.get_id(), name=name, description=description, user=self)
db.session.add(task)
return task
|
def launch_task(self, name, description, *args, **kwargs):
rq_job = current_app.task_queue.enqueue('app.task.{}'.format(name), self.id, *args, **kwargs)
task = Task(id=rq_job.get_id(), name=name, description=description, user=self)
db.session.add(task)
return task<|docstring|>登记任务<|endoftext|>
|
8932ffe70cf6dc76a06f0ccd381360c3d61e80a3356966c62bb6066a32d25fb7
|
def get_tasks_in_progress(self):
'查询本人所有任务进度'
return Task.query.filter_by(user=self, complete=False).all()
|
查询本人所有任务进度
|
app/models.py
|
get_tasks_in_progress
|
superwongo/flask-mega-tutorial
| 2 |
python
|
def get_tasks_in_progress(self):
return Task.query.filter_by(user=self, complete=False).all()
|
def get_tasks_in_progress(self):
return Task.query.filter_by(user=self, complete=False).all()<|docstring|>查询本人所有任务进度<|endoftext|>
|
88cea91b77af80fcce97b0bd991519dcb49e759a17a29c846fdf76dfc181cfcf
|
def get_task_in_progress(self, name):
'查询本人某一任务进度'
return Task.query.filter_by(name=name, user=self, complete=False).first()
|
查询本人某一任务进度
|
app/models.py
|
get_task_in_progress
|
superwongo/flask-mega-tutorial
| 2 |
python
|
def get_task_in_progress(self, name):
return Task.query.filter_by(name=name, user=self, complete=False).first()
|
def get_task_in_progress(self, name):
return Task.query.filter_by(name=name, user=self, complete=False).first()<|docstring|>查询本人某一任务进度<|endoftext|>
|
bdefab3c8cf206a24513bb4db214ef05b27b8198b4f2e92f69ba13f329ecaeed
|
def __repr__(self):
'打印类对象时的展示方式'
return ('<User %r>' % self.username)
|
打印类对象时的展示方式
|
app/models.py
|
__repr__
|
superwongo/flask-mega-tutorial
| 2 |
python
|
def __repr__(self):
return ('<User %r>' % self.username)
|
def __repr__(self):
return ('<User %r>' % self.username)<|docstring|>打印类对象时的展示方式<|endoftext|>
|
c7f27d24ea4959e4c997eaff22d7d1bf4a38bb10ba31458b90069eef5ea6eed0
|
def __eq__(self, *args):
' x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y '
pass
|
x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y
|
release/stubs.min/Autodesk/Revit/DB/__init___parts/ParametersOrder.py
|
__eq__
|
daddycocoaman/ironpython-stubs
| 182 |
python
|
def __eq__(self, *args):
' '
pass
|
def __eq__(self, *args):
' '
pass<|docstring|>x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y<|endoftext|>
|
ada11d26366342d19ddf94f50604d65258a36c58cfc3ac043990b0b26b3c935d
|
def __format__(self, *args):
' __format__(formattable: IFormattable,format: str) -> str '
pass
|
__format__(formattable: IFormattable,format: str) -> str
|
release/stubs.min/Autodesk/Revit/DB/__init___parts/ParametersOrder.py
|
__format__
|
daddycocoaman/ironpython-stubs
| 182 |
python
|
def __format__(self, *args):
' '
pass
|
def __format__(self, *args):
' '
pass<|docstring|>__format__(formattable: IFormattable,format: str) -> str<|endoftext|>
|
32b5271afcd5ecc37febb67dd854fa2d1b2c4c68b2c41d2ec119d33157e9bbaa
|
def __init__(self, *args):
' x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature '
pass
|
x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature
|
release/stubs.min/Autodesk/Revit/DB/__init___parts/ParametersOrder.py
|
__init__
|
daddycocoaman/ironpython-stubs
| 182 |
python
|
def __init__(self, *args):
' '
pass
|
def __init__(self, *args):
' '
pass<|docstring|>x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature<|endoftext|>
|
5c56d348d781f95a78a1ad1b4ec61110fdf68744a7b1f7417362c61ae8e75f15
|
def compile(regex_str: AnyStr, flags: _FlagsType=...) -> Pattern[AnyStr]:
'\n Compile regular expression, return ``regex <regex>`` object.\n '
...
|
Compile regular expression, return ``regex <regex>`` object.
|
dist/micropy-cli/frozen/ure.py
|
compile
|
kevindawson/Pico-Stub
| 19 |
python
|
def compile(regex_str: AnyStr, flags: _FlagsType=...) -> Pattern[AnyStr]:
'\n \n '
...
|
def compile(regex_str: AnyStr, flags: _FlagsType=...) -> Pattern[AnyStr]:
'\n \n '
...<|docstring|>Compile regular expression, return ``regex <regex>`` object.<|endoftext|>
|
86dc3d740dba07a25f68db1b2e200ec32e9dbc6401f7fa79a8ab8b14af4e2dc2
|
def search(regex_str: AnyStr, string: AnyStr, flags: _FlagsType=...) -> Optional[Match[AnyStr]]:
'\n Compile *regex_str* and search it in a *string*. Unlike `match`, this will search\n string for first position which matches regex (which still may be\n 0 if regex is anchored).\n '
...
|
Compile *regex_str* and search it in a *string*. Unlike `match`, this will search
string for first position which matches regex (which still may be
0 if regex is anchored).
|
dist/micropy-cli/frozen/ure.py
|
search
|
kevindawson/Pico-Stub
| 19 |
python
|
def search(regex_str: AnyStr, string: AnyStr, flags: _FlagsType=...) -> Optional[Match[AnyStr]]:
'\n Compile *regex_str* and search it in a *string*. Unlike `match`, this will search\n string for first position which matches regex (which still may be\n 0 if regex is anchored).\n '
...
|
def search(regex_str: AnyStr, string: AnyStr, flags: _FlagsType=...) -> Optional[Match[AnyStr]]:
'\n Compile *regex_str* and search it in a *string*. Unlike `match`, this will search\n string for first position which matches regex (which still may be\n 0 if regex is anchored).\n '
...<|docstring|>Compile *regex_str* and search it in a *string*. Unlike `match`, this will search
string for first position which matches regex (which still may be
0 if regex is anchored).<|endoftext|>
|
294fc870eb8bb6379b5ec2fd3115b353418b400ef25524f582f76ac2ac156fdb
|
def match(regex_str: AnyStr, string: AnyStr, flags: _FlagsType=...) -> Optional[Match[AnyStr]]:
'\n Compile *regex_str* and match against *string*. Match always happens\n from starting position in a string.\n '
...
|
Compile *regex_str* and match against *string*. Match always happens
from starting position in a string.
|
dist/micropy-cli/frozen/ure.py
|
match
|
kevindawson/Pico-Stub
| 19 |
python
|
def match(regex_str: AnyStr, string: AnyStr, flags: _FlagsType=...) -> Optional[Match[AnyStr]]:
'\n Compile *regex_str* and match against *string*. Match always happens\n from starting position in a string.\n '
...
|
def match(regex_str: AnyStr, string: AnyStr, flags: _FlagsType=...) -> Optional[Match[AnyStr]]:
'\n Compile *regex_str* and match against *string*. Match always happens\n from starting position in a string.\n '
...<|docstring|>Compile *regex_str* and match against *string*. Match always happens
from starting position in a string.<|endoftext|>
|
6faf2e08129ccb1057ead69767ede518b37af62e2d63b5ac24f40a8786604829
|
def sub(regex_str: AnyStr, repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr:
'\n Compile *regex_str* and search for it in *string*, replacing all matches\n with *replace*, and returning the new string.\n\n *replace* can be a string or a function. If it is a string then escape\n sequences of the form ``\\<number>`` and ``\\g<number>`` can be used to\n expand to the corresponding group (or an empty string for unmatched groups).\n If *replace* is a function then it must take a single argument (the match)\n and should return a replacement string.\n\n If *count* is specified and non-zero then substitution will stop after\n this many substitutions are made. The *flags* argument is ignored.\n '
...
|
Compile *regex_str* and search for it in *string*, replacing all matches
with *replace*, and returning the new string.
*replace* can be a string or a function. If it is a string then escape
sequences of the form ``\<number>`` and ``\g<number>`` can be used to
expand to the corresponding group (or an empty string for unmatched groups).
If *replace* is a function then it must take a single argument (the match)
and should return a replacement string.
If *count* is specified and non-zero then substitution will stop after
this many substitutions are made. The *flags* argument is ignored.
|
dist/micropy-cli/frozen/ure.py
|
sub
|
kevindawson/Pico-Stub
| 19 |
python
|
def sub(regex_str: AnyStr, repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr:
'\n Compile *regex_str* and search for it in *string*, replacing all matches\n with *replace*, and returning the new string.\n\n *replace* can be a string or a function. If it is a string then escape\n sequences of the form ``\\<number>`` and ``\\g<number>`` can be used to\n expand to the corresponding group (or an empty string for unmatched groups).\n If *replace* is a function then it must take a single argument (the match)\n and should return a replacement string.\n\n If *count* is specified and non-zero then substitution will stop after\n this many substitutions are made. The *flags* argument is ignored.\n '
...
|
def sub(regex_str: AnyStr, repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr:
'\n Compile *regex_str* and search for it in *string*, replacing all matches\n with *replace*, and returning the new string.\n\n *replace* can be a string or a function. If it is a string then escape\n sequences of the form ``\\<number>`` and ``\\g<number>`` can be used to\n expand to the corresponding group (or an empty string for unmatched groups).\n If *replace* is a function then it must take a single argument (the match)\n and should return a replacement string.\n\n If *count* is specified and non-zero then substitution will stop after\n this many substitutions are made. The *flags* argument is ignored.\n '
...<|docstring|>Compile *regex_str* and search for it in *string*, replacing all matches
with *replace*, and returning the new string.
*replace* can be a string or a function. If it is a string then escape
sequences of the form ``\<number>`` and ``\g<number>`` can be used to
expand to the corresponding group (or an empty string for unmatched groups).
If *replace* is a function then it must take a single argument (the match)
and should return a replacement string.
If *count* is specified and non-zero then substitution will stop after
this many substitutions are made. The *flags* argument is ignored.<|endoftext|>
|
71b88b033fbf1e24d70e1609fe4beefcc5a9bb32c69ca118d3312f955bffb519
|
def run(self) -> List[Node]:
'\n Return list of `Node`.\n '
list_node = nodes.bullet_list()
number_of_revisions = self.options.get(OPTION_NUMBER_OF_REVISIONS, 10)
repo = self.get_repo(number_of_revisions)
if (repo is None):
return []
revision = self.options.get(OPTION_REVISION)
for commit in repo.get_commits(revision=revision):
item = self.get_changelog(repo, commit)
list_node.append(item)
return [list_node]
|
Return list of `Node`.
|
sphinxcontrib/vcs.py
|
run
|
t2y/sphinxcontrib-vcs
| 3 |
python
|
def run(self) -> List[Node]:
'\n \n '
list_node = nodes.bullet_list()
number_of_revisions = self.options.get(OPTION_NUMBER_OF_REVISIONS, 10)
repo = self.get_repo(number_of_revisions)
if (repo is None):
return []
revision = self.options.get(OPTION_REVISION)
for commit in repo.get_commits(revision=revision):
item = self.get_changelog(repo, commit)
list_node.append(item)
return [list_node]
|
def run(self) -> List[Node]:
'\n \n '
list_node = nodes.bullet_list()
number_of_revisions = self.options.get(OPTION_NUMBER_OF_REVISIONS, 10)
repo = self.get_repo(number_of_revisions)
if (repo is None):
return []
revision = self.options.get(OPTION_REVISION)
for commit in repo.get_commits(revision=revision):
item = self.get_changelog(repo, commit)
list_node.append(item)
return [list_node]<|docstring|>Return list of `Node`.<|endoftext|>
|
5d17764b838a225b9d7c85022adf3a4c62a3e625c23389898b0f00636e644497
|
def get_repo(self, number_of_revisions: int) -> GitRepository:
'\n Return Git repository.\n '
env = self.state.document.settings.env
return GitRepository(number_of_revisions, env.srcdir, search_parent_directories=True)
|
Return Git repository.
|
sphinxcontrib/vcs.py
|
get_repo
|
t2y/sphinxcontrib-vcs
| 3 |
python
|
def get_repo(self, number_of_revisions: int) -> GitRepository:
'\n \n '
env = self.state.document.settings.env
return GitRepository(number_of_revisions, env.srcdir, search_parent_directories=True)
|
def get_repo(self, number_of_revisions: int) -> GitRepository:
'\n \n '
env = self.state.document.settings.env
return GitRepository(number_of_revisions, env.srcdir, search_parent_directories=True)<|docstring|>Return Git repository.<|endoftext|>
|
d20751691d3217f6320b58e1ce53f2b9c1140eee0f7890c40ddc93391c8a8bdb
|
def get_changelog(self, repo: GitRepository, commit: Commit) -> Node:
'\n Return changelog.\n '
item = nodes.list_item()
para = nodes.paragraph()
para.append(self._make_message_node(commit.message, commit.hexsha))
para.append(nodes.inline(text=' by '))
para.append(nodes.emphasis(text=commit.author.name))
para.append(nodes.inline(text=' at '))
commit_date = datetime.fromtimestamp(commit.authored_date)
para.append(nodes.emphasis(text=str(commit_date)))
item.append(para)
if (OPTION_WITH_REF_URL in self.options):
ref_url = repo.get_commit_url(commit.hexsha)
ref = nodes.reference('', commit.hexsha, refuri=ref_url)
item.append(nodes.paragraph('', '', ref))
if (OPTION_INCLUDE_DIFF in self.options):
diff = repo.get_diff(commit.hexsha)
item.append(self._make_diff_node(diff, commit.hexsha))
return item
|
Return changelog.
|
sphinxcontrib/vcs.py
|
get_changelog
|
t2y/sphinxcontrib-vcs
| 3 |
python
|
def get_changelog(self, repo: GitRepository, commit: Commit) -> Node:
'\n \n '
item = nodes.list_item()
para = nodes.paragraph()
para.append(self._make_message_node(commit.message, commit.hexsha))
para.append(nodes.inline(text=' by '))
para.append(nodes.emphasis(text=commit.author.name))
para.append(nodes.inline(text=' at '))
commit_date = datetime.fromtimestamp(commit.authored_date)
para.append(nodes.emphasis(text=str(commit_date)))
item.append(para)
if (OPTION_WITH_REF_URL in self.options):
ref_url = repo.get_commit_url(commit.hexsha)
ref = nodes.reference(, commit.hexsha, refuri=ref_url)
item.append(nodes.paragraph(, , ref))
if (OPTION_INCLUDE_DIFF in self.options):
diff = repo.get_diff(commit.hexsha)
item.append(self._make_diff_node(diff, commit.hexsha))
return item
|
def get_changelog(self, repo: GitRepository, commit: Commit) -> Node:
'\n \n '
item = nodes.list_item()
para = nodes.paragraph()
para.append(self._make_message_node(commit.message, commit.hexsha))
para.append(nodes.inline(text=' by '))
para.append(nodes.emphasis(text=commit.author.name))
para.append(nodes.inline(text=' at '))
commit_date = datetime.fromtimestamp(commit.authored_date)
para.append(nodes.emphasis(text=str(commit_date)))
item.append(para)
if (OPTION_WITH_REF_URL in self.options):
ref_url = repo.get_commit_url(commit.hexsha)
ref = nodes.reference(, commit.hexsha, refuri=ref_url)
item.append(nodes.paragraph(, , ref))
if (OPTION_INCLUDE_DIFF in self.options):
diff = repo.get_diff(commit.hexsha)
item.append(self._make_diff_node(diff, commit.hexsha))
return item<|docstring|>Return changelog.<|endoftext|>
|
961c543fdd68c0be184674de3522f7d848099d128cd1398f717ef1bb4647dd93
|
@decorators.list_route(methods=['post'])
def mark_all_as_read(self, request):
'\n Marks all unread notifications as read.\n ---\n\n omit_serializer: true\n\n type:\n updated_items:\n type: int\n required: true\n\n responseMessages:\n - code: 200\n message: OK\n - code: 400\n message: BAD REQUEST\n - code: 500\n message: INTERNAL SERVER ERROR\n\n consumes:\n - application/json\n produces:\n - application/json\n '
notifications = self.get_queryset()
count = notifications.filter(is_read=False).update(is_read=True, last_modified=timezone.now())
return Response({'updated_items': count})
|
Marks all unread notifications as read.
---
omit_serializer: true
type:
updated_items:
type: int
required: true
responseMessages:
- code: 200
message: OK
- code: 400
message: BAD REQUEST
- code: 500
message: INTERNAL SERVER ERROR
consumes:
- application/json
produces:
- application/json
|
src/tandlr/notifications/viewsets.py
|
mark_all_as_read
|
shrmoud/schoolapp
| 0 |
python
|
@decorators.list_route(methods=['post'])
def mark_all_as_read(self, request):
'\n Marks all unread notifications as read.\n ---\n\n omit_serializer: true\n\n type:\n updated_items:\n type: int\n required: true\n\n responseMessages:\n - code: 200\n message: OK\n - code: 400\n message: BAD REQUEST\n - code: 500\n message: INTERNAL SERVER ERROR\n\n consumes:\n - application/json\n produces:\n - application/json\n '
notifications = self.get_queryset()
count = notifications.filter(is_read=False).update(is_read=True, last_modified=timezone.now())
return Response({'updated_items': count})
|
@decorators.list_route(methods=['post'])
def mark_all_as_read(self, request):
'\n Marks all unread notifications as read.\n ---\n\n omit_serializer: true\n\n type:\n updated_items:\n type: int\n required: true\n\n responseMessages:\n - code: 200\n message: OK\n - code: 400\n message: BAD REQUEST\n - code: 500\n message: INTERNAL SERVER ERROR\n\n consumes:\n - application/json\n produces:\n - application/json\n '
notifications = self.get_queryset()
count = notifications.filter(is_read=False).update(is_read=True, last_modified=timezone.now())
return Response({'updated_items': count})<|docstring|>Marks all unread notifications as read.
---
omit_serializer: true
type:
updated_items:
type: int
required: true
responseMessages:
- code: 200
message: OK
- code: 400
message: BAD REQUEST
- code: 500
message: INTERNAL SERVER ERROR
consumes:
- application/json
produces:
- application/json<|endoftext|>
|
02a00d09b56e6f193e34fa9d3f49c700ac375e64c6ac8f48b685c8c4fb920df8
|
@decorators.detail_route(methods=['post'])
def mark_as_read(self, request, pk=None):
'\n Marks the given notification as read.\n ---\n\n response_serializer: serializers.NotificationSerializer\n\n responseMessages:\n - code: 200\n message: OK\n - code: 400\n message: BAD REQUEST\n - code: 500\n message: INTERNAL SERVER ERROR\n\n consumes:\n - application/json\n produces:\n - application/json\n '
notification = self.get_object()
notification.is_read = True
notification.save()
serializer = self.get_serializer(notification)
return Response(serializer.data)
|
Marks the given notification as read.
---
response_serializer: serializers.NotificationSerializer
responseMessages:
- code: 200
message: OK
- code: 400
message: BAD REQUEST
- code: 500
message: INTERNAL SERVER ERROR
consumes:
- application/json
produces:
- application/json
|
src/tandlr/notifications/viewsets.py
|
mark_as_read
|
shrmoud/schoolapp
| 0 |
python
|
@decorators.detail_route(methods=['post'])
def mark_as_read(self, request, pk=None):
'\n Marks the given notification as read.\n ---\n\n response_serializer: serializers.NotificationSerializer\n\n responseMessages:\n - code: 200\n message: OK\n - code: 400\n message: BAD REQUEST\n - code: 500\n message: INTERNAL SERVER ERROR\n\n consumes:\n - application/json\n produces:\n - application/json\n '
notification = self.get_object()
notification.is_read = True
notification.save()
serializer = self.get_serializer(notification)
return Response(serializer.data)
|
@decorators.detail_route(methods=['post'])
def mark_as_read(self, request, pk=None):
'\n Marks the given notification as read.\n ---\n\n response_serializer: serializers.NotificationSerializer\n\n responseMessages:\n - code: 200\n message: OK\n - code: 400\n message: BAD REQUEST\n - code: 500\n message: INTERNAL SERVER ERROR\n\n consumes:\n - application/json\n produces:\n - application/json\n '
notification = self.get_object()
notification.is_read = True
notification.save()
serializer = self.get_serializer(notification)
return Response(serializer.data)<|docstring|>Marks the given notification as read.
---
response_serializer: serializers.NotificationSerializer
responseMessages:
- code: 200
message: OK
- code: 400
message: BAD REQUEST
- code: 500
message: INTERNAL SERVER ERROR
consumes:
- application/json
produces:
- application/json<|endoftext|>
|
3023e3acc4d0fdffa6c299f82f4bc20041b968881f2a434b56a0a886510150db
|
def get_queryset(self):
'\n Returns the queryset that will be used to display notifications.\n\n Only notifications for the current user are included.\n '
return Notification.objects.filter(receiver=self.request.user).order_by('-created_date')
|
Returns the queryset that will be used to display notifications.
Only notifications for the current user are included.
|
src/tandlr/notifications/viewsets.py
|
get_queryset
|
shrmoud/schoolapp
| 0 |
python
|
def get_queryset(self):
'\n Returns the queryset that will be used to display notifications.\n\n Only notifications for the current user are included.\n '
return Notification.objects.filter(receiver=self.request.user).order_by('-created_date')
|
def get_queryset(self):
'\n Returns the queryset that will be used to display notifications.\n\n Only notifications for the current user are included.\n '
return Notification.objects.filter(receiver=self.request.user).order_by('-created_date')<|docstring|>Returns the queryset that will be used to display notifications.
Only notifications for the current user are included.<|endoftext|>
|
8266d355701917a767024bf6b850c85630e04e5deab11ecb895ff8126216606a
|
def extract_weather(is_local=False):
'크롤링할 예보를 선택할 시 그에 해당하는 함수를 리턴합니다.\n\n Parameter\n ---------\n\n `is_local` (bool)\n - True: 동네예보\n - False: 중기예보\n\n Returns\n -------\n (url, SEQ_NUM=17) -> list: 동네예보 함수\n (url) -> list: 중기예보 함수\n '
if is_local:
return __extract_weather_local
else:
return __extract_weather_mid_term
|
크롤링할 예보를 선택할 시 그에 해당하는 함수를 리턴합니다.
Parameter
---------
`is_local` (bool)
- True: 동네예보
- False: 중기예보
Returns
-------
(url, SEQ_NUM=17) -> list: 동네예보 함수
(url) -> list: 중기예보 함수
|
scraping/weather.py
|
extract_weather
|
WhiteHyun/Lab
| 0 |
python
|
def extract_weather(is_local=False):
'크롤링할 예보를 선택할 시 그에 해당하는 함수를 리턴합니다.\n\n Parameter\n ---------\n\n `is_local` (bool)\n - True: 동네예보\n - False: 중기예보\n\n Returns\n -------\n (url, SEQ_NUM=17) -> list: 동네예보 함수\n (url) -> list: 중기예보 함수\n '
if is_local:
return __extract_weather_local
else:
return __extract_weather_mid_term
|
def extract_weather(is_local=False):
'크롤링할 예보를 선택할 시 그에 해당하는 함수를 리턴합니다.\n\n Parameter\n ---------\n\n `is_local` (bool)\n - True: 동네예보\n - False: 중기예보\n\n Returns\n -------\n (url, SEQ_NUM=17) -> list: 동네예보 함수\n (url) -> list: 중기예보 함수\n '
if is_local:
return __extract_weather_local
else:
return __extract_weather_mid_term<|docstring|>크롤링할 예보를 선택할 시 그에 해당하는 함수를 리턴합니다.
Parameter
---------
`is_local` (bool)
- True: 동네예보
- False: 중기예보
Returns
-------
(url, SEQ_NUM=17) -> list: 동네예보 함수
(url) -> list: 중기예보 함수<|endoftext|>
|
8427915e9c4db2a0d5432c3310b648b9d30b2c4fbc118987a44b1b06068a1773
|
def __extract_weather_local(url, SEQ_NUM=17) -> list:
'\n Explanation\n ----\n 분석할 기상청 주소: https://www.weather.go.kr/weather/lifenindustry/sevice_rss.jsp?sido=2800000000&gugun=2818500000&dong=2818582000&x=21&y=3\n\n 참고한 사이트: https://kocoafab.cc/tutorial/view/595\n\n 동네예보를 크롤링하여 값을 가져오는 함수입니다.\n 중기예보와는 양식이 틀리오니 적절하게 사용하시길 바랍니다.\n\n Tags\n -----\n\n `<day>`: 날짜\n - 오늘: 0\n - 내일: 1\n - 모레: 2\n\n `<temp>`: 온도\n\n `<tmx>`: 최고 기온\n\n `<tmn>`: 최저 기온\n\n `<sky>`: 하늘 상태\n\n `<pty>`: 강수 형태\n\n `<pop>`: 강수 확률\n\n `<ws>`: 풍속\n\n `<wd>`: 풍향\n\n `<reh>`: 습도\n\n `<r12>`: 12시간 강수량\n\n `<s12>`: 12시간 신적설\n\n `<r06>`: 6시간 강수량\n\n `<s06>`: 6시간 신적설\n\n Return\n -------\n\n weather_data (list-dict): 리스트내부의 리스트 마다 각 날짜별 데이터들을 딕셔너리 형태로 가지고있습니다.\n\n '
import requests
from bs4 import BeautifulSoup
weather_result = requests.get(url)
weather_soup = BeautifulSoup(weather_result.text, 'html.parser')
data_list = []
for i in range(SEQ_NUM):
temp = weather_soup.find('data', {'seq': i})
if (temp is not None):
data_list.append(temp)
data_length = len(data_list)
weather_data = [[] for i in range(len(data_list))]
for n in range(data_length):
weather_data[n].append(data_list[n].find('day').string)
weather_data[n].append(data_list[n].find('hour').string)
weather_data[n].append(data_list[n].find('temp').string)
weather_data[n].append(data_list[n].find('reh').string)
return weather_data
|
Explanation
----
분석할 기상청 주소: https://www.weather.go.kr/weather/lifenindustry/sevice_rss.jsp?sido=2800000000&gugun=2818500000&dong=2818582000&x=21&y=3
참고한 사이트: https://kocoafab.cc/tutorial/view/595
동네예보를 크롤링하여 값을 가져오는 함수입니다.
중기예보와는 양식이 틀리오니 적절하게 사용하시길 바랍니다.
Tags
-----
`<day>`: 날짜
- 오늘: 0
- 내일: 1
- 모레: 2
`<temp>`: 온도
`<tmx>`: 최고 기온
`<tmn>`: 최저 기온
`<sky>`: 하늘 상태
`<pty>`: 강수 형태
`<pop>`: 강수 확률
`<ws>`: 풍속
`<wd>`: 풍향
`<reh>`: 습도
`<r12>`: 12시간 강수량
`<s12>`: 12시간 신적설
`<r06>`: 6시간 강수량
`<s06>`: 6시간 신적설
Return
-------
weather_data (list-dict): 리스트내부의 리스트 마다 각 날짜별 데이터들을 딕셔너리 형태로 가지고있습니다.
|
scraping/weather.py
|
__extract_weather_local
|
WhiteHyun/Lab
| 0 |
python
|
def __extract_weather_local(url, SEQ_NUM=17) -> list:
'\n Explanation\n ----\n 분석할 기상청 주소: https://www.weather.go.kr/weather/lifenindustry/sevice_rss.jsp?sido=2800000000&gugun=2818500000&dong=2818582000&x=21&y=3\n\n 참고한 사이트: https://kocoafab.cc/tutorial/view/595\n\n 동네예보를 크롤링하여 값을 가져오는 함수입니다.\n 중기예보와는 양식이 틀리오니 적절하게 사용하시길 바랍니다.\n\n Tags\n -----\n\n `<day>`: 날짜\n - 오늘: 0\n - 내일: 1\n - 모레: 2\n\n `<temp>`: 온도\n\n `<tmx>`: 최고 기온\n\n `<tmn>`: 최저 기온\n\n `<sky>`: 하늘 상태\n\n `<pty>`: 강수 형태\n\n `<pop>`: 강수 확률\n\n `<ws>`: 풍속\n\n `<wd>`: 풍향\n\n `<reh>`: 습도\n\n `<r12>`: 12시간 강수량\n\n `<s12>`: 12시간 신적설\n\n `<r06>`: 6시간 강수량\n\n `<s06>`: 6시간 신적설\n\n Return\n -------\n\n weather_data (list-dict): 리스트내부의 리스트 마다 각 날짜별 데이터들을 딕셔너리 형태로 가지고있습니다.\n\n '
import requests
from bs4 import BeautifulSoup
weather_result = requests.get(url)
weather_soup = BeautifulSoup(weather_result.text, 'html.parser')
data_list = []
for i in range(SEQ_NUM):
temp = weather_soup.find('data', {'seq': i})
if (temp is not None):
data_list.append(temp)
data_length = len(data_list)
weather_data = [[] for i in range(len(data_list))]
for n in range(data_length):
weather_data[n].append(data_list[n].find('day').string)
weather_data[n].append(data_list[n].find('hour').string)
weather_data[n].append(data_list[n].find('temp').string)
weather_data[n].append(data_list[n].find('reh').string)
return weather_data
|
def __extract_weather_local(url, SEQ_NUM=17) -> list:
'\n Explanation\n ----\n 분석할 기상청 주소: https://www.weather.go.kr/weather/lifenindustry/sevice_rss.jsp?sido=2800000000&gugun=2818500000&dong=2818582000&x=21&y=3\n\n 참고한 사이트: https://kocoafab.cc/tutorial/view/595\n\n 동네예보를 크롤링하여 값을 가져오는 함수입니다.\n 중기예보와는 양식이 틀리오니 적절하게 사용하시길 바랍니다.\n\n Tags\n -----\n\n `<day>`: 날짜\n - 오늘: 0\n - 내일: 1\n - 모레: 2\n\n `<temp>`: 온도\n\n `<tmx>`: 최고 기온\n\n `<tmn>`: 최저 기온\n\n `<sky>`: 하늘 상태\n\n `<pty>`: 강수 형태\n\n `<pop>`: 강수 확률\n\n `<ws>`: 풍속\n\n `<wd>`: 풍향\n\n `<reh>`: 습도\n\n `<r12>`: 12시간 강수량\n\n `<s12>`: 12시간 신적설\n\n `<r06>`: 6시간 강수량\n\n `<s06>`: 6시간 신적설\n\n Return\n -------\n\n weather_data (list-dict): 리스트내부의 리스트 마다 각 날짜별 데이터들을 딕셔너리 형태로 가지고있습니다.\n\n '
import requests
from bs4 import BeautifulSoup
weather_result = requests.get(url)
weather_soup = BeautifulSoup(weather_result.text, 'html.parser')
data_list = []
for i in range(SEQ_NUM):
temp = weather_soup.find('data', {'seq': i})
if (temp is not None):
data_list.append(temp)
data_length = len(data_list)
weather_data = [[] for i in range(len(data_list))]
for n in range(data_length):
weather_data[n].append(data_list[n].find('day').string)
weather_data[n].append(data_list[n].find('hour').string)
weather_data[n].append(data_list[n].find('temp').string)
weather_data[n].append(data_list[n].find('reh').string)
return weather_data<|docstring|>Explanation
----
분석할 기상청 주소: https://www.weather.go.kr/weather/lifenindustry/sevice_rss.jsp?sido=2800000000&gugun=2818500000&dong=2818582000&x=21&y=3
참고한 사이트: https://kocoafab.cc/tutorial/view/595
동네예보를 크롤링하여 값을 가져오는 함수입니다.
중기예보와는 양식이 틀리오니 적절하게 사용하시길 바랍니다.
Tags
-----
`<day>`: 날짜
- 오늘: 0
- 내일: 1
- 모레: 2
`<temp>`: 온도
`<tmx>`: 최고 기온
`<tmn>`: 최저 기온
`<sky>`: 하늘 상태
`<pty>`: 강수 형태
`<pop>`: 강수 확률
`<ws>`: 풍속
`<wd>`: 풍향
`<reh>`: 습도
`<r12>`: 12시간 강수량
`<s12>`: 12시간 신적설
`<r06>`: 6시간 강수량
`<s06>`: 6시간 신적설
Return
-------
weather_data (list-dict): 리스트내부의 리스트 마다 각 날짜별 데이터들을 딕셔너리 형태로 가지고있습니다.<|endoftext|>
|
411e637dd3d39174b0d87c4629064b0eebf2291b764b6118f38fcb8d40eb51e1
|
def __extract_weather_mid_term(url) -> list:
'\n\n Explanation\n ----\n 분석할 기상청 주소: https://www.weather.go.kr/weather/lifenindustry/sevice_rss.jsp?sido=2800000000&gugun=2818500000&dong=2818582000&x=21&y=3\n\n 참고한 사이트: https://kocoafab.cc/tutorial/view/595\n\n 중기예보를 크롤링하여 값을 가져오는 함수입니다.\n 동네예보와는 양식이 틀리오니 적절하게 사용하시길 바랍니다.\n\n Tags\n -----\n\n `<mode>`: 예보날짜\n - A01: 전일 예보\n - A02: 오전, 오후 구분 예보\n\n `<tmEf>`: 시간(yyyy-mm-dd 00:00)\n\n `<wf>`: 날씨예보\n\n `<tmn>`: 최저온도\n\n `<tmx>`: 최고온도\n\n `<rnSt>`: 강수확률\n\n Return\n -------\n\n weather_data (list-dict): 리스트내부의 리스트 마다 각 날짜별 데이터들을 딕셔너리 형태로 가지고있습니다.\n\n '
import requests
from bs4 import BeautifulSoup
weather_result = requests.get(url)
weather_soup = BeautifulSoup(weather_result.text, 'html.parser')
weather_data = []
for location_data in weather_soup.findAll('location', {'wl_ver': 3}):
city = location_data.find('city')
for data in location_data.findAll('data'):
dictionary = {}
dictionary['city'] = city.string
dictionary['mode'] = data.find('mode').string
dictionary['tmef'] = data.find('tmef').string
dictionary['wf'] = data.find('wf').string
dictionary['tmn'] = data.find('tmn').string
dictionary['tmx'] = data.find('tmx').string
dictionary['rnst'] = data.find('rnst').string
weather_data.append(dictionary)
return weather_data
|
Explanation
----
분석할 기상청 주소: https://www.weather.go.kr/weather/lifenindustry/sevice_rss.jsp?sido=2800000000&gugun=2818500000&dong=2818582000&x=21&y=3
참고한 사이트: https://kocoafab.cc/tutorial/view/595
중기예보를 크롤링하여 값을 가져오는 함수입니다.
동네예보와는 양식이 틀리오니 적절하게 사용하시길 바랍니다.
Tags
-----
`<mode>`: 예보날짜
- A01: 전일 예보
- A02: 오전, 오후 구분 예보
`<tmEf>`: 시간(yyyy-mm-dd 00:00)
`<wf>`: 날씨예보
`<tmn>`: 최저온도
`<tmx>`: 최고온도
`<rnSt>`: 강수확률
Return
-------
weather_data (list-dict): 리스트내부의 리스트 마다 각 날짜별 데이터들을 딕셔너리 형태로 가지고있습니다.
|
scraping/weather.py
|
__extract_weather_mid_term
|
WhiteHyun/Lab
| 0 |
python
|
def __extract_weather_mid_term(url) -> list:
'\n\n Explanation\n ----\n 분석할 기상청 주소: https://www.weather.go.kr/weather/lifenindustry/sevice_rss.jsp?sido=2800000000&gugun=2818500000&dong=2818582000&x=21&y=3\n\n 참고한 사이트: https://kocoafab.cc/tutorial/view/595\n\n 중기예보를 크롤링하여 값을 가져오는 함수입니다.\n 동네예보와는 양식이 틀리오니 적절하게 사용하시길 바랍니다.\n\n Tags\n -----\n\n `<mode>`: 예보날짜\n - A01: 전일 예보\n - A02: 오전, 오후 구분 예보\n\n `<tmEf>`: 시간(yyyy-mm-dd 00:00)\n\n `<wf>`: 날씨예보\n\n `<tmn>`: 최저온도\n\n `<tmx>`: 최고온도\n\n `<rnSt>`: 강수확률\n\n Return\n -------\n\n weather_data (list-dict): 리스트내부의 리스트 마다 각 날짜별 데이터들을 딕셔너리 형태로 가지고있습니다.\n\n '
import requests
from bs4 import BeautifulSoup
weather_result = requests.get(url)
weather_soup = BeautifulSoup(weather_result.text, 'html.parser')
weather_data = []
for location_data in weather_soup.findAll('location', {'wl_ver': 3}):
city = location_data.find('city')
for data in location_data.findAll('data'):
dictionary = {}
dictionary['city'] = city.string
dictionary['mode'] = data.find('mode').string
dictionary['tmef'] = data.find('tmef').string
dictionary['wf'] = data.find('wf').string
dictionary['tmn'] = data.find('tmn').string
dictionary['tmx'] = data.find('tmx').string
dictionary['rnst'] = data.find('rnst').string
weather_data.append(dictionary)
return weather_data
|
def __extract_weather_mid_term(url) -> list:
'\n\n Explanation\n ----\n 분석할 기상청 주소: https://www.weather.go.kr/weather/lifenindustry/sevice_rss.jsp?sido=2800000000&gugun=2818500000&dong=2818582000&x=21&y=3\n\n 참고한 사이트: https://kocoafab.cc/tutorial/view/595\n\n 중기예보를 크롤링하여 값을 가져오는 함수입니다.\n 동네예보와는 양식이 틀리오니 적절하게 사용하시길 바랍니다.\n\n Tags\n -----\n\n `<mode>`: 예보날짜\n - A01: 전일 예보\n - A02: 오전, 오후 구분 예보\n\n `<tmEf>`: 시간(yyyy-mm-dd 00:00)\n\n `<wf>`: 날씨예보\n\n `<tmn>`: 최저온도\n\n `<tmx>`: 최고온도\n\n `<rnSt>`: 강수확률\n\n Return\n -------\n\n weather_data (list-dict): 리스트내부의 리스트 마다 각 날짜별 데이터들을 딕셔너리 형태로 가지고있습니다.\n\n '
import requests
from bs4 import BeautifulSoup
weather_result = requests.get(url)
weather_soup = BeautifulSoup(weather_result.text, 'html.parser')
weather_data = []
for location_data in weather_soup.findAll('location', {'wl_ver': 3}):
city = location_data.find('city')
for data in location_data.findAll('data'):
dictionary = {}
dictionary['city'] = city.string
dictionary['mode'] = data.find('mode').string
dictionary['tmef'] = data.find('tmef').string
dictionary['wf'] = data.find('wf').string
dictionary['tmn'] = data.find('tmn').string
dictionary['tmx'] = data.find('tmx').string
dictionary['rnst'] = data.find('rnst').string
weather_data.append(dictionary)
return weather_data<|docstring|>Explanation
----
분석할 기상청 주소: https://www.weather.go.kr/weather/lifenindustry/sevice_rss.jsp?sido=2800000000&gugun=2818500000&dong=2818582000&x=21&y=3
참고한 사이트: https://kocoafab.cc/tutorial/view/595
중기예보를 크롤링하여 값을 가져오는 함수입니다.
동네예보와는 양식이 틀리오니 적절하게 사용하시길 바랍니다.
Tags
-----
`<mode>`: 예보날짜
- A01: 전일 예보
- A02: 오전, 오후 구분 예보
`<tmEf>`: 시간(yyyy-mm-dd 00:00)
`<wf>`: 날씨예보
`<tmn>`: 최저온도
`<tmx>`: 최고온도
`<rnSt>`: 강수확률
Return
-------
weather_data (list-dict): 리스트내부의 리스트 마다 각 날짜별 데이터들을 딕셔너리 형태로 가지고있습니다.<|endoftext|>
|
d6974c47d8b6c5ef3b78a37aa6582a11427e7b820777cde6f5134fca7c6920ab
|
@pytest.fixture(scope='class')
def in_fake_venv():
'Move test to execute inside a mocked, empty virtual environment.'
dirpath = tempfile.mkdtemp(dir=utils.TEMPDIR_ROOT)
os.makedirs(os.path.join(dirpath, constants.LIB_SITE_PACKAGES))
os.makedirs(os.path.join(dirpath, constants.LIB32_SITE_PACKAGES))
os.makedirs(os.path.join(dirpath, constants.LIB64_SITE_PACKAGES))
os.makedirs(os.path.join(dirpath, constants.BIN_PYCACHE))
open(os.path.join(dirpath, 'bin', constants.PYTHON_VERSION_SEGMENT), 'wb').close()
try:
with utils.chdir(dirpath):
(yield dirpath)
finally:
shutil.rmtree(dirpath)
|
Move test to execute inside a mocked, empty virtual environment.
|
client/verta/tests/custom_modules/conftest.py
|
in_fake_venv
|
mitdbg/modeldb
| 835 |
python
|
@pytest.fixture(scope='class')
def in_fake_venv():
dirpath = tempfile.mkdtemp(dir=utils.TEMPDIR_ROOT)
os.makedirs(os.path.join(dirpath, constants.LIB_SITE_PACKAGES))
os.makedirs(os.path.join(dirpath, constants.LIB32_SITE_PACKAGES))
os.makedirs(os.path.join(dirpath, constants.LIB64_SITE_PACKAGES))
os.makedirs(os.path.join(dirpath, constants.BIN_PYCACHE))
open(os.path.join(dirpath, 'bin', constants.PYTHON_VERSION_SEGMENT), 'wb').close()
try:
with utils.chdir(dirpath):
(yield dirpath)
finally:
shutil.rmtree(dirpath)
|
@pytest.fixture(scope='class')
def in_fake_venv():
dirpath = tempfile.mkdtemp(dir=utils.TEMPDIR_ROOT)
os.makedirs(os.path.join(dirpath, constants.LIB_SITE_PACKAGES))
os.makedirs(os.path.join(dirpath, constants.LIB32_SITE_PACKAGES))
os.makedirs(os.path.join(dirpath, constants.LIB64_SITE_PACKAGES))
os.makedirs(os.path.join(dirpath, constants.BIN_PYCACHE))
open(os.path.join(dirpath, 'bin', constants.PYTHON_VERSION_SEGMENT), 'wb').close()
try:
with utils.chdir(dirpath):
(yield dirpath)
finally:
shutil.rmtree(dirpath)<|docstring|>Move test to execute inside a mocked, empty virtual environment.<|endoftext|>
|
99924aafc8f7bf207de10e131fa7e2b633351af69601da5f5ba06315cc535bb8
|
def get_normalize_layer(dataset: str) -> torch.nn.Module:
"Return the dataset's normalization layer"
if (dataset == 'imagenet'):
return NormalizeLayer(_IMAGENET_MEAN, _IMAGENET_STDDEV)
elif (dataset == 'cifar10'):
return NormalizeLayer(_CIFAR10_MEAN, _CIFAR10_STDDEV)
|
Return the dataset's normalization layer
|
RSCP/utils.py
|
get_normalize_layer
|
Asafgendler/RSCP
| 0 |
python
|
def get_normalize_layer(dataset: str) -> torch.nn.Module:
if (dataset == 'imagenet'):
return NormalizeLayer(_IMAGENET_MEAN, _IMAGENET_STDDEV)
elif (dataset == 'cifar10'):
return NormalizeLayer(_CIFAR10_MEAN, _CIFAR10_STDDEV)
|
def get_normalize_layer(dataset: str) -> torch.nn.Module:
if (dataset == 'imagenet'):
return NormalizeLayer(_IMAGENET_MEAN, _IMAGENET_STDDEV)
elif (dataset == 'cifar10'):
return NormalizeLayer(_CIFAR10_MEAN, _CIFAR10_STDDEV)<|docstring|>Return the dataset's normalization layer<|endoftext|>
|
bfca63fd608a64adbd073ea8d475701bf685e37858ac22f0e39d21cfd1c1aa26
|
def __init__(self, means: List[float], sds: List[float]):
'\n :param means: the channel means\n :param sds: the channel standard deviations\n '
super(NormalizeLayer, self).__init__()
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
self.means = torch.tensor(means).to(device)
self.sds = torch.tensor(sds).to(device)
|
:param means: the channel means
:param sds: the channel standard deviations
|
RSCP/utils.py
|
__init__
|
Asafgendler/RSCP
| 0 |
python
|
def __init__(self, means: List[float], sds: List[float]):
'\n :param means: the channel means\n :param sds: the channel standard deviations\n '
super(NormalizeLayer, self).__init__()
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
self.means = torch.tensor(means).to(device)
self.sds = torch.tensor(sds).to(device)
|
def __init__(self, means: List[float], sds: List[float]):
'\n :param means: the channel means\n :param sds: the channel standard deviations\n '
super(NormalizeLayer, self).__init__()
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
self.means = torch.tensor(means).to(device)
self.sds = torch.tensor(sds).to(device)<|docstring|>:param means: the channel means
:param sds: the channel standard deviations<|endoftext|>
|
e9de3aa8fe7b93427da1e5a606fbbc090b60b5ad5adb563693319b5b7e6ae4d9
|
def __init__(self, loc):
'\n :param loc: List of cards\n :type loc: [TraitCards, ...]\n :return: None\n '
self.loc = loc
|
:param loc: List of cards
:type loc: [TraitCards, ...]
:return: None
|
benchmarks/Evolution/both/dealer/deck.py
|
__init__
|
nuprl/retic_performance
| 3 |
python
|
def __init__(self, loc):
'\n :param loc: List of cards\n :type loc: [TraitCards, ...]\n :return: None\n '
self.loc = loc
|
def __init__(self, loc):
'\n :param loc: List of cards\n :type loc: [TraitCards, ...]\n :return: None\n '
self.loc = loc<|docstring|>:param loc: List of cards
:type loc: [TraitCards, ...]
:return: None<|endoftext|>
|
a993e96985cadf2e443d05d2f2ff775c1968832fa36af82babfe1db233c751f4
|
def get_n_cards(self, n):
'\n Give the player n cards, if there are enough\n cards\n :param n: number of cards\n :type n: Nat\n :return: n cards\n :rtype: [TraitCard, ...]\n '
loc = []
for i in range(n):
if self.has_next():
loc.append(self.get_next())
return loc
|
Give the player n cards, if there are enough
cards
:param n: number of cards
:type n: Nat
:return: n cards
:rtype: [TraitCard, ...]
|
benchmarks/Evolution/both/dealer/deck.py
|
get_n_cards
|
nuprl/retic_performance
| 3 |
python
|
def get_n_cards(self, n):
'\n Give the player n cards, if there are enough\n cards\n :param n: number of cards\n :type n: Nat\n :return: n cards\n :rtype: [TraitCard, ...]\n '
loc = []
for i in range(n):
if self.has_next():
loc.append(self.get_next())
return loc
|
def get_n_cards(self, n):
'\n Give the player n cards, if there are enough\n cards\n :param n: number of cards\n :type n: Nat\n :return: n cards\n :rtype: [TraitCard, ...]\n '
loc = []
for i in range(n):
if self.has_next():
loc.append(self.get_next())
return loc<|docstring|>Give the player n cards, if there are enough
cards
:param n: number of cards
:type n: Nat
:return: n cards
:rtype: [TraitCard, ...]<|endoftext|>
|
2978580d58dd651a8faffd3382c9e1e4c7a000d4c8b600fd8193db4eae4a205c
|
def get_next(self):
'\n Gets the next card in the deck\n :return: the next card\n :rtype: TraitCard\n '
return self.loc.pop(0)
|
Gets the next card in the deck
:return: the next card
:rtype: TraitCard
|
benchmarks/Evolution/both/dealer/deck.py
|
get_next
|
nuprl/retic_performance
| 3 |
python
|
def get_next(self):
'\n Gets the next card in the deck\n :return: the next card\n :rtype: TraitCard\n '
return self.loc.pop(0)
|
def get_next(self):
'\n Gets the next card in the deck\n :return: the next card\n :rtype: TraitCard\n '
return self.loc.pop(0)<|docstring|>Gets the next card in the deck
:return: the next card
:rtype: TraitCard<|endoftext|>
|
3a8c344cd1b7177c4e4f627670b92d59759a7f15a7d98a771e77393aeb32f0df
|
def has_next(self):
'\n Does this deck have anymore cards?\n :return: Returns true if there are more cards in the deck and false otherwise\n :rtype: Boolean\n '
return (len(self.loc) > 0)
|
Does this deck have anymore cards?
:return: Returns true if there are more cards in the deck and false otherwise
:rtype: Boolean
|
benchmarks/Evolution/both/dealer/deck.py
|
has_next
|
nuprl/retic_performance
| 3 |
python
|
def has_next(self):
'\n Does this deck have anymore cards?\n :return: Returns true if there are more cards in the deck and false otherwise\n :rtype: Boolean\n '
return (len(self.loc) > 0)
|
def has_next(self):
'\n Does this deck have anymore cards?\n :return: Returns true if there are more cards in the deck and false otherwise\n :rtype: Boolean\n '
return (len(self.loc) > 0)<|docstring|>Does this deck have anymore cards?
:return: Returns true if there are more cards in the deck and false otherwise
:rtype: Boolean<|endoftext|>
|
0bdd1603a4db120e8438bd9043dcd7a1f5f8a328496090ee67cc2ccbcc40335e
|
@staticmethod
def make_deck():
'\n Creates a deck of cards\n :return: a deck of cards\n :rtype: Deck\n '
deck_list = []
for key in sorted(trait_dictionary):
deck_list += trait_dictionary[key].make_list_of_cards()
return Deck(deck_list)
|
Creates a deck of cards
:return: a deck of cards
:rtype: Deck
|
benchmarks/Evolution/both/dealer/deck.py
|
make_deck
|
nuprl/retic_performance
| 3 |
python
|
@staticmethod
def make_deck():
'\n Creates a deck of cards\n :return: a deck of cards\n :rtype: Deck\n '
deck_list = []
for key in sorted(trait_dictionary):
deck_list += trait_dictionary[key].make_list_of_cards()
return Deck(deck_list)
|
@staticmethod
def make_deck():
'\n Creates a deck of cards\n :return: a deck of cards\n :rtype: Deck\n '
deck_list = []
for key in sorted(trait_dictionary):
deck_list += trait_dictionary[key].make_list_of_cards()
return Deck(deck_list)<|docstring|>Creates a deck of cards
:return: a deck of cards
:rtype: Deck<|endoftext|>
|
51b457954bad84d6fa39b341751428404b1eea0d694a6c6adc4abaddc20dcd99
|
@staticmethod
def _download_blob(target, source_url):
'Download data into a target from a source blob URL. We symlink\n local files.\n\n Args:\n target (`Luigi.Target`): A Luigi Target object\n source_url (str): Source data URL, accepts file:// and s3://\n\n Returns:\n None\n '
url = urlparse(source_url)
if (url.scheme.lower() == 'file'):
_logger.info('Copying {} from file {}'.format(target.path, url.path))
if (not os.path.exists(url.path)):
raise RuntimeError('Unable to find source file {}'.format(url.path))
shutil.copyfile(url.path, target.path)
elif (url.scheme.lower() == 's3'):
_logger.info('Downloading to {} from {}'.format(target.path, url.geturl()))
s3.get_s3_file(url.geturl(), target.path)
else:
_logger.info('Assuming file: Copying {} from file {}'.format(target.path, url.path))
if (not os.path.exists(url.path)):
raise RuntimeError('Unable to find source file {}'.format(url.path))
shutil.copyfile(url.path, target.path)
|
Download data into a target from a source blob URL. We symlink
local files.
Args:
target (`Luigi.Target`): A Luigi Target object
source_url (str): Source data URL, accepts file:// and s3://
Returns:
None
|
examples/pipelines/download.py
|
_download_blob
|
akashshah59/disdat
| 1 |
python
|
@staticmethod
def _download_blob(target, source_url):
'Download data into a target from a source blob URL. We symlink\n local files.\n\n Args:\n target (`Luigi.Target`): A Luigi Target object\n source_url (str): Source data URL, accepts file:// and s3://\n\n Returns:\n None\n '
url = urlparse(source_url)
if (url.scheme.lower() == 'file'):
_logger.info('Copying {} from file {}'.format(target.path, url.path))
if (not os.path.exists(url.path)):
raise RuntimeError('Unable to find source file {}'.format(url.path))
shutil.copyfile(url.path, target.path)
elif (url.scheme.lower() == 's3'):
_logger.info('Downloading to {} from {}'.format(target.path, url.geturl()))
s3.get_s3_file(url.geturl(), target.path)
else:
_logger.info('Assuming file: Copying {} from file {}'.format(target.path, url.path))
if (not os.path.exists(url.path)):
raise RuntimeError('Unable to find source file {}'.format(url.path))
shutil.copyfile(url.path, target.path)
|
@staticmethod
def _download_blob(target, source_url):
'Download data into a target from a source blob URL. We symlink\n local files.\n\n Args:\n target (`Luigi.Target`): A Luigi Target object\n source_url (str): Source data URL, accepts file:// and s3://\n\n Returns:\n None\n '
url = urlparse(source_url)
if (url.scheme.lower() == 'file'):
_logger.info('Copying {} from file {}'.format(target.path, url.path))
if (not os.path.exists(url.path)):
raise RuntimeError('Unable to find source file {}'.format(url.path))
shutil.copyfile(url.path, target.path)
elif (url.scheme.lower() == 's3'):
_logger.info('Downloading to {} from {}'.format(target.path, url.geturl()))
s3.get_s3_file(url.geturl(), target.path)
else:
_logger.info('Assuming file: Copying {} from file {}'.format(target.path, url.path))
if (not os.path.exists(url.path)):
raise RuntimeError('Unable to find source file {}'.format(url.path))
shutil.copyfile(url.path, target.path)<|docstring|>Download data into a target from a source blob URL. We symlink
local files.
Args:
target (`Luigi.Target`): A Luigi Target object
source_url (str): Source data URL, accepts file:// and s3://
Returns:
None<|endoftext|>
|
910329d1f543723424638b4ba9a44934cc0cbd2651a22a820348eac58d0c0c13
|
def pipe_run(self):
'Download data from a source blob URL.\n\n Args:\n pipeline_input (`pandas.DataFrame`): A single-row, single-column dataframe with a remote URL\n '
source_url = self._validate_and_get_input_url()
target = self.create_output_file(os.path.basename(source_url))
Download._download_blob(target, source_url)
return {self.OUTPUT_FILE_KEY: [target.path]}
|
Download data from a source blob URL.
Args:
pipeline_input (`pandas.DataFrame`): A single-row, single-column dataframe with a remote URL
|
examples/pipelines/download.py
|
pipe_run
|
akashshah59/disdat
| 1 |
python
|
def pipe_run(self):
'Download data from a source blob URL.\n\n Args:\n pipeline_input (`pandas.DataFrame`): A single-row, single-column dataframe with a remote URL\n '
source_url = self._validate_and_get_input_url()
target = self.create_output_file(os.path.basename(source_url))
Download._download_blob(target, source_url)
return {self.OUTPUT_FILE_KEY: [target.path]}
|
def pipe_run(self):
'Download data from a source blob URL.\n\n Args:\n pipeline_input (`pandas.DataFrame`): A single-row, single-column dataframe with a remote URL\n '
source_url = self._validate_and_get_input_url()
target = self.create_output_file(os.path.basename(source_url))
Download._download_blob(target, source_url)
return {self.OUTPUT_FILE_KEY: [target.path]}<|docstring|>Download data from a source blob URL.
Args:
pipeline_input (`pandas.DataFrame`): A single-row, single-column dataframe with a remote URL<|endoftext|>
|
f78f4b8bb57cc7cbe5166ed23ba932e1ee2a86df7f2aa7867b3008f508ed30b6
|
@blueprint.get('/global')
@permission_required('admin.access')
@templated
def view_global():
'Show more global admin items.'
return {}
|
Show more global admin items.
|
byceps/blueprints/admin/more/views.py
|
view_global
|
GyBraLAN/byceps
| 33 |
python
|
@blueprint.get('/global')
@permission_required('admin.access')
@templated
def view_global():
return {}
|
@blueprint.get('/global')
@permission_required('admin.access')
@templated
def view_global():
return {}<|docstring|>Show more global admin items.<|endoftext|>
|
bade26336c027815a5ad667afa0d945287244e7f07e43e49470d7c068d4eb0f2
|
@blueprint.get('/brands/<brand_id>')
@permission_required('admin.access')
@templated
def view_brand(brand_id):
'Show more brand admin items.'
brand = brand_service.find_brand(brand_id)
if (brand is None):
abort(404)
return {'brand': brand}
|
Show more brand admin items.
|
byceps/blueprints/admin/more/views.py
|
view_brand
|
GyBraLAN/byceps
| 33 |
python
|
@blueprint.get('/brands/<brand_id>')
@permission_required('admin.access')
@templated
def view_brand(brand_id):
brand = brand_service.find_brand(brand_id)
if (brand is None):
abort(404)
return {'brand': brand}
|
@blueprint.get('/brands/<brand_id>')
@permission_required('admin.access')
@templated
def view_brand(brand_id):
brand = brand_service.find_brand(brand_id)
if (brand is None):
abort(404)
return {'brand': brand}<|docstring|>Show more brand admin items.<|endoftext|>
|
8a3d41ce64adeb7fc646594b079075a7a38026cf325175993442980dde9b39a1
|
@blueprint.get('/parties/<party_id>')
@permission_required('admin.access')
@templated
def view_party(party_id):
'Show more party admin items.'
party = party_service.find_party(party_id)
if (party is None):
abort(404)
return {'party': party}
|
Show more party admin items.
|
byceps/blueprints/admin/more/views.py
|
view_party
|
GyBraLAN/byceps
| 33 |
python
|
@blueprint.get('/parties/<party_id>')
@permission_required('admin.access')
@templated
def view_party(party_id):
party = party_service.find_party(party_id)
if (party is None):
abort(404)
return {'party': party}
|
@blueprint.get('/parties/<party_id>')
@permission_required('admin.access')
@templated
def view_party(party_id):
party = party_service.find_party(party_id)
if (party is None):
abort(404)
return {'party': party}<|docstring|>Show more party admin items.<|endoftext|>
|
9dc3f30f5e9919742c09e72b69ed8facb9fca6bbdaa3ca40abed9a43761b66f0
|
@blueprint.get('/sites/<site_id>')
@permission_required('admin.access')
@templated
def view_site(site_id):
'Show more site admin items.'
site = site_service.find_site(site_id)
if (site is None):
abort(404)
return {'site': site}
|
Show more site admin items.
|
byceps/blueprints/admin/more/views.py
|
view_site
|
GyBraLAN/byceps
| 33 |
python
|
@blueprint.get('/sites/<site_id>')
@permission_required('admin.access')
@templated
def view_site(site_id):
site = site_service.find_site(site_id)
if (site is None):
abort(404)
return {'site': site}
|
@blueprint.get('/sites/<site_id>')
@permission_required('admin.access')
@templated
def view_site(site_id):
site = site_service.find_site(site_id)
if (site is None):
abort(404)
return {'site': site}<|docstring|>Show more site admin items.<|endoftext|>
|
cdd68b2995fbd48ad3f1afdbbc43639bdd83c1fbfc6128ad77e4d50d926e6c0a
|
def create(self, validated_data):
'\n Create\n\n '
password = self.context['request'].data.get('password')
if (not password):
raise serializers.ValidationError({'password': [_('This field is required.')]})
password_errors = validate_password(password)
if password_errors:
raise serializers.ValidationError({'password': password_errors})
account = models.Account.objects.create_user(password=password, **validated_data)
account.save()
auth = authenticate(username=account.username, password=password)
login(self.context['request'], auth)
return account
|
Create
|
accounts/serializers.py
|
create
|
ebar0n/palermo-coin
| 0 |
python
|
def create(self, validated_data):
'\n \n\n '
password = self.context['request'].data.get('password')
if (not password):
raise serializers.ValidationError({'password': [_('This field is required.')]})
password_errors = validate_password(password)
if password_errors:
raise serializers.ValidationError({'password': password_errors})
account = models.Account.objects.create_user(password=password, **validated_data)
account.save()
auth = authenticate(username=account.username, password=password)
login(self.context['request'], auth)
return account
|
def create(self, validated_data):
'\n \n\n '
password = self.context['request'].data.get('password')
if (not password):
raise serializers.ValidationError({'password': [_('This field is required.')]})
password_errors = validate_password(password)
if password_errors:
raise serializers.ValidationError({'password': password_errors})
account = models.Account.objects.create_user(password=password, **validated_data)
account.save()
auth = authenticate(username=account.username, password=password)
login(self.context['request'], auth)
return account<|docstring|>Create<|endoftext|>
|
0fcf63751996565985a1f8f1d2ddeacbf048bc26a4e4f209c2eb19622ef6aca5
|
def _create_netcdf(self, file_name, clobber):
'\n Create new (empty) netCDF for model state and parameters\n \n Arguments:\n file_name: String, path to which file should be saved \n clobber: Boolean, enable/disable overwriting output file\n \n Model components are responsible for initializing thier own output\n variables, using the expected .init_netcdf method.\n '
self._logger.info('Creating input file: {}'.format(file_name))
zlib = False
complevel = 1
shuffle = True
chunksizes = (1, self._y.size, self._x.size)
nc = netCDF4.Dataset(file_name, 'w', format='NETCDF4', clobber=clobber)
nc.version = py_ice_cascade_version
nc.time_start = self._time_start
nc.time_step = self._time_step
nc.num_steps = self._num_steps
nc.out_steps = 'see step variable'
nc.createDimension('x', size=self._x.size)
nc.createDimension('y', size=self._y.size)
nc.createDimension('time', size=self._out_steps.size)
nc.createVariable('x', np.double, dimensions='x')
nc['x'].long_name = 'x coordinate'
nc['x'].units = 'm'
nc['x'][:] = self._x
nc.createVariable('y', np.double, dimensions='y')
nc['y'].long_name = 'y coordinate'
nc['y'].units = 'm'
nc['y'][:] = self._y
nc.createVariable('time', np.double, dimensions='time')
nc['time'].long_name = 'time coordinate'
nc['time'].units = 'a'
nc['time'].start = self._time_start
nc['time'].step = self._time_step
nc.createVariable('step', np.int64, dimensions='time')
nc['step'].long_name = 'model time step'
nc['step'].units = '1'
nc['step'].num_steps = self._num_steps
nc['step'].out_steps = self._out_steps
nc.createVariable('z_rx', np.double, dimensions=('time', 'y', 'x'), zlib=zlib, complevel=complevel, shuffle=shuffle, chunksizes=chunksizes)
nc['z_rx'].long_name = 'bedrock surface elevation'
nc['z_rx'].units = 'm'
self._model_hill.init_netcdf(nc, zlib, complevel, shuffle, chunksizes)
self._model_uplift.init_netcdf(nc, zlib, complevel, shuffle, chunksizes)
nc.close()
|
Create new (empty) netCDF for model state and parameters
Arguments:
file_name: String, path to which file should be saved
clobber: Boolean, enable/disable overwriting output file
Model components are responsible for initializing thier own output
variables, using the expected .init_netcdf method.
|
py_ice_cascade/main.py
|
_create_netcdf
|
keithfma/py_ice_cascade
| 0 |
python
|
def _create_netcdf(self, file_name, clobber):
'\n Create new (empty) netCDF for model state and parameters\n \n Arguments:\n file_name: String, path to which file should be saved \n clobber: Boolean, enable/disable overwriting output file\n \n Model components are responsible for initializing thier own output\n variables, using the expected .init_netcdf method.\n '
self._logger.info('Creating input file: {}'.format(file_name))
zlib = False
complevel = 1
shuffle = True
chunksizes = (1, self._y.size, self._x.size)
nc = netCDF4.Dataset(file_name, 'w', format='NETCDF4', clobber=clobber)
nc.version = py_ice_cascade_version
nc.time_start = self._time_start
nc.time_step = self._time_step
nc.num_steps = self._num_steps
nc.out_steps = 'see step variable'
nc.createDimension('x', size=self._x.size)
nc.createDimension('y', size=self._y.size)
nc.createDimension('time', size=self._out_steps.size)
nc.createVariable('x', np.double, dimensions='x')
nc['x'].long_name = 'x coordinate'
nc['x'].units = 'm'
nc['x'][:] = self._x
nc.createVariable('y', np.double, dimensions='y')
nc['y'].long_name = 'y coordinate'
nc['y'].units = 'm'
nc['y'][:] = self._y
nc.createVariable('time', np.double, dimensions='time')
nc['time'].long_name = 'time coordinate'
nc['time'].units = 'a'
nc['time'].start = self._time_start
nc['time'].step = self._time_step
nc.createVariable('step', np.int64, dimensions='time')
nc['step'].long_name = 'model time step'
nc['step'].units = '1'
nc['step'].num_steps = self._num_steps
nc['step'].out_steps = self._out_steps
nc.createVariable('z_rx', np.double, dimensions=('time', 'y', 'x'), zlib=zlib, complevel=complevel, shuffle=shuffle, chunksizes=chunksizes)
nc['z_rx'].long_name = 'bedrock surface elevation'
nc['z_rx'].units = 'm'
self._model_hill.init_netcdf(nc, zlib, complevel, shuffle, chunksizes)
self._model_uplift.init_netcdf(nc, zlib, complevel, shuffle, chunksizes)
nc.close()
|
def _create_netcdf(self, file_name, clobber):
'\n Create new (empty) netCDF for model state and parameters\n \n Arguments:\n file_name: String, path to which file should be saved \n clobber: Boolean, enable/disable overwriting output file\n \n Model components are responsible for initializing thier own output\n variables, using the expected .init_netcdf method.\n '
self._logger.info('Creating input file: {}'.format(file_name))
zlib = False
complevel = 1
shuffle = True
chunksizes = (1, self._y.size, self._x.size)
nc = netCDF4.Dataset(file_name, 'w', format='NETCDF4', clobber=clobber)
nc.version = py_ice_cascade_version
nc.time_start = self._time_start
nc.time_step = self._time_step
nc.num_steps = self._num_steps
nc.out_steps = 'see step variable'
nc.createDimension('x', size=self._x.size)
nc.createDimension('y', size=self._y.size)
nc.createDimension('time', size=self._out_steps.size)
nc.createVariable('x', np.double, dimensions='x')
nc['x'].long_name = 'x coordinate'
nc['x'].units = 'm'
nc['x'][:] = self._x
nc.createVariable('y', np.double, dimensions='y')
nc['y'].long_name = 'y coordinate'
nc['y'].units = 'm'
nc['y'][:] = self._y
nc.createVariable('time', np.double, dimensions='time')
nc['time'].long_name = 'time coordinate'
nc['time'].units = 'a'
nc['time'].start = self._time_start
nc['time'].step = self._time_step
nc.createVariable('step', np.int64, dimensions='time')
nc['step'].long_name = 'model time step'
nc['step'].units = '1'
nc['step'].num_steps = self._num_steps
nc['step'].out_steps = self._out_steps
nc.createVariable('z_rx', np.double, dimensions=('time', 'y', 'x'), zlib=zlib, complevel=complevel, shuffle=shuffle, chunksizes=chunksizes)
nc['z_rx'].long_name = 'bedrock surface elevation'
nc['z_rx'].units = 'm'
self._model_hill.init_netcdf(nc, zlib, complevel, shuffle, chunksizes)
self._model_uplift.init_netcdf(nc, zlib, complevel, shuffle, chunksizes)
nc.close()<|docstring|>Create new (empty) netCDF for model state and parameters
Arguments:
file_name: String, path to which file should be saved
clobber: Boolean, enable/disable overwriting output file
Model components are responsible for initializing thier own output
variables, using the expected .init_netcdf method.<|endoftext|>
|
cef0b414e8dd73b649500dc3b538355dec79e470f1f3ad62d8e92bdefad99eba
|
def _to_netcdf(self, file_name):
'\n Append model state and parameters to netCDF file\n \n Arguments:\n file_name: String, path to which file should be saved \n '
if (self._step in self._out_steps):
self._logger.info('Write output for time={:.2f}, step={}'.format(self._time, self._step))
ii = list(self._out_steps).index(self._step)
nc = netCDF4.Dataset(file_name, 'a')
nc['time'][ii] = self._time
nc['step'][ii] = self._step
nc['z_rx'][(ii, :, :)] = self._z_rx
self._model_hill.to_netcdf(nc, ii)
self._model_uplift.to_netcdf(nc, ii)
nc.close()
|
Append model state and parameters to netCDF file
Arguments:
file_name: String, path to which file should be saved
|
py_ice_cascade/main.py
|
_to_netcdf
|
keithfma/py_ice_cascade
| 0 |
python
|
def _to_netcdf(self, file_name):
'\n Append model state and parameters to netCDF file\n \n Arguments:\n file_name: String, path to which file should be saved \n '
if (self._step in self._out_steps):
self._logger.info('Write output for time={:.2f}, step={}'.format(self._time, self._step))
ii = list(self._out_steps).index(self._step)
nc = netCDF4.Dataset(file_name, 'a')
nc['time'][ii] = self._time
nc['step'][ii] = self._step
nc['z_rx'][(ii, :, :)] = self._z_rx
self._model_hill.to_netcdf(nc, ii)
self._model_uplift.to_netcdf(nc, ii)
nc.close()
|
def _to_netcdf(self, file_name):
'\n Append model state and parameters to netCDF file\n \n Arguments:\n file_name: String, path to which file should be saved \n '
if (self._step in self._out_steps):
self._logger.info('Write output for time={:.2f}, step={}'.format(self._time, self._step))
ii = list(self._out_steps).index(self._step)
nc = netCDF4.Dataset(file_name, 'a')
nc['time'][ii] = self._time
nc['step'][ii] = self._step
nc['z_rx'][(ii, :, :)] = self._z_rx
self._model_hill.to_netcdf(nc, ii)
self._model_uplift.to_netcdf(nc, ii)
nc.close()<|docstring|>Append model state and parameters to netCDF file
Arguments:
file_name: String, path to which file should be saved<|endoftext|>
|
358cb955beaf64f9518af7fc58d0f8920a01043383b6e95b15b2fdbca7e38607
|
def run(self, file_name, clobber=False):
'\n Run model simulation, save results to file\n\n Arguments:\n file_name: String, path to which results should be saved \n clobber: Boolean, allow overwriting output file\n '
self._logger.info('Initialize simulation')
self._delta = np.abs((self._x[1] - self._x[0]))
self._time_end = (self._time_start + (self._time_step * (self._num_steps - 1)))
self._time = self._time_start
self._step = 0
self._create_netcdf(file_name, clobber)
self._to_netcdf(file_name)
while (self._step < self._num_steps):
self._model_hill.set_height(self._z_rx)
self._model_uplift.set_height(self._z_rx)
self._model_hill.run(self._time_step)
self._model_uplift.run(self._time, (self._time + self._time_step))
dzdt = ((self._model_hill.get_height() + self._model_uplift.get_height()) - (2 * self._z_rx))
self._z_rx += dzdt
self._time += self._time_step
self._step += 1
self._to_netcdf(file_name)
self._logger.info('Simulation complete')
|
Run model simulation, save results to file
Arguments:
file_name: String, path to which results should be saved
clobber: Boolean, allow overwriting output file
|
py_ice_cascade/main.py
|
run
|
keithfma/py_ice_cascade
| 0 |
python
|
def run(self, file_name, clobber=False):
'\n Run model simulation, save results to file\n\n Arguments:\n file_name: String, path to which results should be saved \n clobber: Boolean, allow overwriting output file\n '
self._logger.info('Initialize simulation')
self._delta = np.abs((self._x[1] - self._x[0]))
self._time_end = (self._time_start + (self._time_step * (self._num_steps - 1)))
self._time = self._time_start
self._step = 0
self._create_netcdf(file_name, clobber)
self._to_netcdf(file_name)
while (self._step < self._num_steps):
self._model_hill.set_height(self._z_rx)
self._model_uplift.set_height(self._z_rx)
self._model_hill.run(self._time_step)
self._model_uplift.run(self._time, (self._time + self._time_step))
dzdt = ((self._model_hill.get_height() + self._model_uplift.get_height()) - (2 * self._z_rx))
self._z_rx += dzdt
self._time += self._time_step
self._step += 1
self._to_netcdf(file_name)
self._logger.info('Simulation complete')
|
def run(self, file_name, clobber=False):
'\n Run model simulation, save results to file\n\n Arguments:\n file_name: String, path to which results should be saved \n clobber: Boolean, allow overwriting output file\n '
self._logger.info('Initialize simulation')
self._delta = np.abs((self._x[1] - self._x[0]))
self._time_end = (self._time_start + (self._time_step * (self._num_steps - 1)))
self._time = self._time_start
self._step = 0
self._create_netcdf(file_name, clobber)
self._to_netcdf(file_name)
while (self._step < self._num_steps):
self._model_hill.set_height(self._z_rx)
self._model_uplift.set_height(self._z_rx)
self._model_hill.run(self._time_step)
self._model_uplift.run(self._time, (self._time + self._time_step))
dzdt = ((self._model_hill.get_height() + self._model_uplift.get_height()) - (2 * self._z_rx))
self._z_rx += dzdt
self._time += self._time_step
self._step += 1
self._to_netcdf(file_name)
self._logger.info('Simulation complete')<|docstring|>Run model simulation, save results to file
Arguments:
file_name: String, path to which results should be saved
clobber: Boolean, allow overwriting output file<|endoftext|>
|
4f9e31e2580e880c90f9e847edf0a021921fc70a8eca6a7321cd968b965fa0d3
|
def __init__(self, host=None, port=None, certificate=None, base_path='/mn', private_key=None, verbosity='INFO', id='none', num_documents=(- 1), num_workers=1, max_num_errors=3, regex=None, retry=0, ignore_harvest_time=False, no_harvest=False, logger=None, sitemap_url=None):
"\n Parameters\n ----------\n base_path : str\n The default installation instructions for GMN imply that this\n should be '/mn' but the ARM install uses '/arm', so we have to\n make this optional.\n host, port : str, int\n This fully identifies the DataONE host and port number where we\n will be sending metadata records.\n certificate, private_key : str or path or None\n Paths to client side certificates. None if no verification is\n desired.\n logger : logging.Logger\n Use this logger instead of the default.\n max_num_errors : int\n Abort if this threshold is reached.\n num_documents : int\n Limit the number of documents to this number. Less than zero\n means retrieve them all.\n "
self.mn_host = host
self.setup_session(certificate, private_key)
self.setup_logging(id, verbosity, logger=logger)
self.mn_base_url = f'https://{host}:{port}{base_path}'
self.sys_meta_dict = {'submitter': 'TBD', 'rightsholder': 'TBD', 'authoritativeMN': 'TBD', 'originMN': 'TBD', 'formatId_custom': 'http://www.isotc211.org/2005/gmd'}
self._certificate = certificate
self._private_key = private_key
self.client_mgr = D1ClientManager(self.mn_base_url, certificate, private_key, self.logger)
self.job_records = []
self.failed_count = 0
self.updated_count = 0
self.created_count = 0
self.retry = retry
self.num_documents = num_documents
self.num_records_processed = 0
self.num_workers = num_workers
self.max_num_errors = max_num_errors
self.regex = (re.compile(regex) if (regex is not None) else None)
self.ignore_harvest_time = ignore_harvest_time
self.no_harvest = no_harvest
self.sitemap_url = sitemap_url
self._sitemaps = []
self._sitemap_records = []
requests.packages.urllib3.disable_warnings()
|
Parameters
----------
base_path : str
The default installation instructions for GMN imply that this
should be '/mn' but the ARM install uses '/arm', so we have to
make this optional.
host, port : str, int
This fully identifies the DataONE host and port number where we
will be sending metadata records.
certificate, private_key : str or path or None
Paths to client side certificates. None if no verification is
desired.
logger : logging.Logger
Use this logger instead of the default.
max_num_errors : int
Abort if this threshold is reached.
num_documents : int
Limit the number of documents to this number. Less than zero
means retrieve them all.
|
schema_org/schema_org/core.py
|
__init__
|
DataONEorg/d1_ncei_adapter
| 1 |
python
|
def __init__(self, host=None, port=None, certificate=None, base_path='/mn', private_key=None, verbosity='INFO', id='none', num_documents=(- 1), num_workers=1, max_num_errors=3, regex=None, retry=0, ignore_harvest_time=False, no_harvest=False, logger=None, sitemap_url=None):
"\n Parameters\n ----------\n base_path : str\n The default installation instructions for GMN imply that this\n should be '/mn' but the ARM install uses '/arm', so we have to\n make this optional.\n host, port : str, int\n This fully identifies the DataONE host and port number where we\n will be sending metadata records.\n certificate, private_key : str or path or None\n Paths to client side certificates. None if no verification is\n desired.\n logger : logging.Logger\n Use this logger instead of the default.\n max_num_errors : int\n Abort if this threshold is reached.\n num_documents : int\n Limit the number of documents to this number. Less than zero\n means retrieve them all.\n "
self.mn_host = host
self.setup_session(certificate, private_key)
self.setup_logging(id, verbosity, logger=logger)
self.mn_base_url = f'https://{host}:{port}{base_path}'
self.sys_meta_dict = {'submitter': 'TBD', 'rightsholder': 'TBD', 'authoritativeMN': 'TBD', 'originMN': 'TBD', 'formatId_custom': 'http://www.isotc211.org/2005/gmd'}
self._certificate = certificate
self._private_key = private_key
self.client_mgr = D1ClientManager(self.mn_base_url, certificate, private_key, self.logger)
self.job_records = []
self.failed_count = 0
self.updated_count = 0
self.created_count = 0
self.retry = retry
self.num_documents = num_documents
self.num_records_processed = 0
self.num_workers = num_workers
self.max_num_errors = max_num_errors
self.regex = (re.compile(regex) if (regex is not None) else None)
self.ignore_harvest_time = ignore_harvest_time
self.no_harvest = no_harvest
self.sitemap_url = sitemap_url
self._sitemaps = []
self._sitemap_records = []
requests.packages.urllib3.disable_warnings()
|
def __init__(self, host=None, port=None, certificate=None, base_path='/mn', private_key=None, verbosity='INFO', id='none', num_documents=(- 1), num_workers=1, max_num_errors=3, regex=None, retry=0, ignore_harvest_time=False, no_harvest=False, logger=None, sitemap_url=None):
"\n Parameters\n ----------\n base_path : str\n The default installation instructions for GMN imply that this\n should be '/mn' but the ARM install uses '/arm', so we have to\n make this optional.\n host, port : str, int\n This fully identifies the DataONE host and port number where we\n will be sending metadata records.\n certificate, private_key : str or path or None\n Paths to client side certificates. None if no verification is\n desired.\n logger : logging.Logger\n Use this logger instead of the default.\n max_num_errors : int\n Abort if this threshold is reached.\n num_documents : int\n Limit the number of documents to this number. Less than zero\n means retrieve them all.\n "
self.mn_host = host
self.setup_session(certificate, private_key)
self.setup_logging(id, verbosity, logger=logger)
self.mn_base_url = f'https://{host}:{port}{base_path}'
self.sys_meta_dict = {'submitter': 'TBD', 'rightsholder': 'TBD', 'authoritativeMN': 'TBD', 'originMN': 'TBD', 'formatId_custom': 'http://www.isotc211.org/2005/gmd'}
self._certificate = certificate
self._private_key = private_key
self.client_mgr = D1ClientManager(self.mn_base_url, certificate, private_key, self.logger)
self.job_records = []
self.failed_count = 0
self.updated_count = 0
self.created_count = 0
self.retry = retry
self.num_documents = num_documents
self.num_records_processed = 0
self.num_workers = num_workers
self.max_num_errors = max_num_errors
self.regex = (re.compile(regex) if (regex is not None) else None)
self.ignore_harvest_time = ignore_harvest_time
self.no_harvest = no_harvest
self.sitemap_url = sitemap_url
self._sitemaps = []
self._sitemap_records = []
requests.packages.urllib3.disable_warnings()<|docstring|>Parameters
----------
base_path : str
The default installation instructions for GMN imply that this
should be '/mn' but the ARM install uses '/arm', so we have to
make this optional.
host, port : str, int
This fully identifies the DataONE host and port number where we
will be sending metadata records.
certificate, private_key : str or path or None
Paths to client side certificates. None if no verification is
desired.
logger : logging.Logger
Use this logger instead of the default.
max_num_errors : int
Abort if this threshold is reached.
num_documents : int
Limit the number of documents to this number. Less than zero
means retrieve them all.<|endoftext|>
|
061c365e854d9e47ca3bb7a4f51a5d84f53ddf9c722cc27e90f43febad2ad75a
|
def get_sitemaps(self):
'\n Return list of sitemaps (plural, in case the sitemap is nested).\n '
return self._sitemaps
|
Return list of sitemaps (plural, in case the sitemap is nested).
|
schema_org/schema_org/core.py
|
get_sitemaps
|
DataONEorg/d1_ncei_adapter
| 1 |
python
|
def get_sitemaps(self):
'\n \n '
return self._sitemaps
|
def get_sitemaps(self):
'\n \n '
return self._sitemaps<|docstring|>Return list of sitemaps (plural, in case the sitemap is nested).<|endoftext|>
|
d23f14644745bfc663e210fda0f5944ceb96895cf3b41756b977404303f1c049
|
def get_sitemaps_urlset(self):
'\n Return list of landing page URLs and last modified times of the landing\n pages.\n\n Filter the items if no lastmod time was listed. Replace the too-old\n time with None.\n '
return [((item[0], None) if (item[1] is _TOO_OLD_HARVEST_DATETIME) else item) for item in self._sitemap_records]
|
Return list of landing page URLs and last modified times of the landing
pages.
Filter the items if no lastmod time was listed. Replace the too-old
time with None.
|
schema_org/schema_org/core.py
|
get_sitemaps_urlset
|
DataONEorg/d1_ncei_adapter
| 1 |
python
|
def get_sitemaps_urlset(self):
'\n Return list of landing page URLs and last modified times of the landing\n pages.\n\n Filter the items if no lastmod time was listed. Replace the too-old\n time with None.\n '
return [((item[0], None) if (item[1] is _TOO_OLD_HARVEST_DATETIME) else item) for item in self._sitemap_records]
|
def get_sitemaps_urlset(self):
'\n Return list of landing page URLs and last modified times of the landing\n pages.\n\n Filter the items if no lastmod time was listed. Replace the too-old\n time with None.\n '
return [((item[0], None) if (item[1] is _TOO_OLD_HARVEST_DATETIME) else item) for item in self._sitemap_records]<|docstring|>Return list of landing page URLs and last modified times of the landing
pages.
Filter the items if no lastmod time was listed. Replace the too-old
time with None.<|endoftext|>
|
a4e2471dbe049e723bb8486dc0dc7bdec008fc39218a6145d79a0d5f10ed354b
|
def setup_session(self, certificate, private_key):
'\n Instantiate a requests session to help persist certain parameters\n across requests.\n\n See https://2.python-requests.org/en/master/user/advanced/ for further\n information.\n '
self.session = requests.Session()
if ((certificate is None) and (private_key is None)):
self.session.verify = False
else:
self.session.cert = (certificate, private_key)
self.session.headers = {'User-Agent': 'DataONE adapter for schema.org harvest', 'From': '[email protected]'}
|
Instantiate a requests session to help persist certain parameters
across requests.
See https://2.python-requests.org/en/master/user/advanced/ for further
information.
|
schema_org/schema_org/core.py
|
setup_session
|
DataONEorg/d1_ncei_adapter
| 1 |
python
|
def setup_session(self, certificate, private_key):
'\n Instantiate a requests session to help persist certain parameters\n across requests.\n\n See https://2.python-requests.org/en/master/user/advanced/ for further\n information.\n '
self.session = requests.Session()
if ((certificate is None) and (private_key is None)):
self.session.verify = False
else:
self.session.cert = (certificate, private_key)
self.session.headers = {'User-Agent': 'DataONE adapter for schema.org harvest', 'From': '[email protected]'}
|
def setup_session(self, certificate, private_key):
'\n Instantiate a requests session to help persist certain parameters\n across requests.\n\n See https://2.python-requests.org/en/master/user/advanced/ for further\n information.\n '
self.session = requests.Session()
if ((certificate is None) and (private_key is None)):
self.session.verify = False
else:
self.session.cert = (certificate, private_key)
self.session.headers = {'User-Agent': 'DataONE adapter for schema.org harvest', 'From': '[email protected]'}<|docstring|>Instantiate a requests session to help persist certain parameters
across requests.
See https://2.python-requests.org/en/master/user/advanced/ for further
information.<|endoftext|>
|
a0510de7f0b4ca7d5fe7ebcbd41ce3e068937fc1a69f4cdb86b460b6ebc6596f
|
def setup_logging(self, logid, verbosity, logger=None):
'\n We will log both to STDOUT and to a file.\n\n Parameters\n ----------\n logid : str\n Use this to help name the physical log file.\n logger : logging.Logger\n Use this logger instead of the default.\n verbosity : str\n Level of logging verbosity.\n '
if (logger is not None):
self.logger = logger
return
level = getattr(logging, verbosity)
self.logger = logging.getLogger('datatone')
self.logger.setLevel(level)
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(format)
formatter.default_msec_format = '%s.%03d'
formatter.converter = time.gmtime
fh = logging.FileHandler(f'{logid}.log', delay=True)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
stream = logging.StreamHandler(sys.stdout)
stream.setFormatter(formatter)
self.logger.addHandler(stream)
|
We will log both to STDOUT and to a file.
Parameters
----------
logid : str
Use this to help name the physical log file.
logger : logging.Logger
Use this logger instead of the default.
verbosity : str
Level of logging verbosity.
|
schema_org/schema_org/core.py
|
setup_logging
|
DataONEorg/d1_ncei_adapter
| 1 |
python
|
def setup_logging(self, logid, verbosity, logger=None):
'\n We will log both to STDOUT and to a file.\n\n Parameters\n ----------\n logid : str\n Use this to help name the physical log file.\n logger : logging.Logger\n Use this logger instead of the default.\n verbosity : str\n Level of logging verbosity.\n '
if (logger is not None):
self.logger = logger
return
level = getattr(logging, verbosity)
self.logger = logging.getLogger('datatone')
self.logger.setLevel(level)
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(format)
formatter.default_msec_format = '%s.%03d'
formatter.converter = time.gmtime
fh = logging.FileHandler(f'{logid}.log', delay=True)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
stream = logging.StreamHandler(sys.stdout)
stream.setFormatter(formatter)
self.logger.addHandler(stream)
|
def setup_logging(self, logid, verbosity, logger=None):
'\n We will log both to STDOUT and to a file.\n\n Parameters\n ----------\n logid : str\n Use this to help name the physical log file.\n logger : logging.Logger\n Use this logger instead of the default.\n verbosity : str\n Level of logging verbosity.\n '
if (logger is not None):
self.logger = logger
return
level = getattr(logging, verbosity)
self.logger = logging.getLogger('datatone')
self.logger.setLevel(level)
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(format)
formatter.default_msec_format = '%s.%03d'
formatter.converter = time.gmtime
fh = logging.FileHandler(f'{logid}.log', delay=True)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
stream = logging.StreamHandler(sys.stdout)
stream.setFormatter(formatter)
self.logger.addHandler(stream)<|docstring|>We will log both to STDOUT and to a file.
Parameters
----------
logid : str
Use this to help name the physical log file.
logger : logging.Logger
Use this logger instead of the default.
verbosity : str
Level of logging verbosity.<|endoftext|>
|
d366ceba2f34152fe598d15573347cf41ef2a26565b3d59d0d7406cf5c38de5d
|
def generate_system_metadata(self, *, scimeta_bytes=None, native_identifier_sid=None, record_date=None, record_version=None):
"\n This function generates a system metadata document for describing\n the science metadata record being loaded. Some of the fields,\n such as checksum and size, are based off the bytes of the science\n metadata object itself. Other system metadata fields are passed\n to D1ClientManager in a dict which is configured in the main\n adapter program. Note that the checksum is assigned as an\n arbitrary version identifier to accommodate the source system's\n mutable content represented in the target system's immutable\n content standard.\n\n This is the default case. It should be specialized by each slender\n nodes object.\n\n Parameters\n ----------\n scimeta_bytes :\n Bytes of the node's original metadata document.\n native_identifier_sid :\n Node's system identifier for this object, which becomes the series\n ID, or sid.\n record_date :\n Date metadata document was created/modified in the source\n system. Becomes dateUploaded.\n record_version : str\n Will be the pid.\n\n Returns\n -------\n A dict containing node-specific system metadata properties that\n will apply to all science metadata documents loaded into GMN.\n "
sys_meta = v2.systemMetadata()
sys_meta.seriesId = native_identifier_sid
sys_meta.formatId = self.sys_meta_dict['formatId_custom']
sys_meta.size = len(scimeta_bytes)
digest = hashlib.md5(scimeta_bytes).hexdigest()
sys_meta.checksum = dataoneTypes.checksum(digest)
sys_meta.checksum.algorithm = 'MD5'
if (record_version is None):
sys_meta.identifier = sys_meta.checksum.value()
else:
sys_meta.identifier = record_version
sys_meta.dateUploaded = record_date
sys_meta.dateSysMetadataModified = dt.datetime.now(dt.timezone.utc)
sys_meta.rightsHolder = self.sys_meta_dict['rightsholder']
sys_meta.submitter = self.sys_meta_dict['submitter']
sys_meta.authoritativeMemberNode = self.sys_meta_dict['authoritativeMN']
sys_meta.originMemberNode = self.sys_meta_dict['originMN']
sys_meta.accessPolicy = self.generate_public_access_policy()
return sys_meta
|
This function generates a system metadata document for describing
the science metadata record being loaded. Some of the fields,
such as checksum and size, are based off the bytes of the science
metadata object itself. Other system metadata fields are passed
to D1ClientManager in a dict which is configured in the main
adapter program. Note that the checksum is assigned as an
arbitrary version identifier to accommodate the source system's
mutable content represented in the target system's immutable
content standard.
This is the default case. It should be specialized by each slender
nodes object.
Parameters
----------
scimeta_bytes :
Bytes of the node's original metadata document.
native_identifier_sid :
Node's system identifier for this object, which becomes the series
ID, or sid.
record_date :
Date metadata document was created/modified in the source
system. Becomes dateUploaded.
record_version : str
Will be the pid.
Returns
-------
A dict containing node-specific system metadata properties that
will apply to all science metadata documents loaded into GMN.
|
schema_org/schema_org/core.py
|
generate_system_metadata
|
DataONEorg/d1_ncei_adapter
| 1 |
python
|
def generate_system_metadata(self, *, scimeta_bytes=None, native_identifier_sid=None, record_date=None, record_version=None):
"\n This function generates a system metadata document for describing\n the science metadata record being loaded. Some of the fields,\n such as checksum and size, are based off the bytes of the science\n metadata object itself. Other system metadata fields are passed\n to D1ClientManager in a dict which is configured in the main\n adapter program. Note that the checksum is assigned as an\n arbitrary version identifier to accommodate the source system's\n mutable content represented in the target system's immutable\n content standard.\n\n This is the default case. It should be specialized by each slender\n nodes object.\n\n Parameters\n ----------\n scimeta_bytes :\n Bytes of the node's original metadata document.\n native_identifier_sid :\n Node's system identifier for this object, which becomes the series\n ID, or sid.\n record_date :\n Date metadata document was created/modified in the source\n system. Becomes dateUploaded.\n record_version : str\n Will be the pid.\n\n Returns\n -------\n A dict containing node-specific system metadata properties that\n will apply to all science metadata documents loaded into GMN.\n "
sys_meta = v2.systemMetadata()
sys_meta.seriesId = native_identifier_sid
sys_meta.formatId = self.sys_meta_dict['formatId_custom']
sys_meta.size = len(scimeta_bytes)
digest = hashlib.md5(scimeta_bytes).hexdigest()
sys_meta.checksum = dataoneTypes.checksum(digest)
sys_meta.checksum.algorithm = 'MD5'
if (record_version is None):
sys_meta.identifier = sys_meta.checksum.value()
else:
sys_meta.identifier = record_version
sys_meta.dateUploaded = record_date
sys_meta.dateSysMetadataModified = dt.datetime.now(dt.timezone.utc)
sys_meta.rightsHolder = self.sys_meta_dict['rightsholder']
sys_meta.submitter = self.sys_meta_dict['submitter']
sys_meta.authoritativeMemberNode = self.sys_meta_dict['authoritativeMN']
sys_meta.originMemberNode = self.sys_meta_dict['originMN']
sys_meta.accessPolicy = self.generate_public_access_policy()
return sys_meta
|
def generate_system_metadata(self, *, scimeta_bytes=None, native_identifier_sid=None, record_date=None, record_version=None):
"\n This function generates a system metadata document for describing\n the science metadata record being loaded. Some of the fields,\n such as checksum and size, are based off the bytes of the science\n metadata object itself. Other system metadata fields are passed\n to D1ClientManager in a dict which is configured in the main\n adapter program. Note that the checksum is assigned as an\n arbitrary version identifier to accommodate the source system's\n mutable content represented in the target system's immutable\n content standard.\n\n This is the default case. It should be specialized by each slender\n nodes object.\n\n Parameters\n ----------\n scimeta_bytes :\n Bytes of the node's original metadata document.\n native_identifier_sid :\n Node's system identifier for this object, which becomes the series\n ID, or sid.\n record_date :\n Date metadata document was created/modified in the source\n system. Becomes dateUploaded.\n record_version : str\n Will be the pid.\n\n Returns\n -------\n A dict containing node-specific system metadata properties that\n will apply to all science metadata documents loaded into GMN.\n "
sys_meta = v2.systemMetadata()
sys_meta.seriesId = native_identifier_sid
sys_meta.formatId = self.sys_meta_dict['formatId_custom']
sys_meta.size = len(scimeta_bytes)
digest = hashlib.md5(scimeta_bytes).hexdigest()
sys_meta.checksum = dataoneTypes.checksum(digest)
sys_meta.checksum.algorithm = 'MD5'
if (record_version is None):
sys_meta.identifier = sys_meta.checksum.value()
else:
sys_meta.identifier = record_version
sys_meta.dateUploaded = record_date
sys_meta.dateSysMetadataModified = dt.datetime.now(dt.timezone.utc)
sys_meta.rightsHolder = self.sys_meta_dict['rightsholder']
sys_meta.submitter = self.sys_meta_dict['submitter']
sys_meta.authoritativeMemberNode = self.sys_meta_dict['authoritativeMN']
sys_meta.originMemberNode = self.sys_meta_dict['originMN']
sys_meta.accessPolicy = self.generate_public_access_policy()
return sys_meta<|docstring|>This function generates a system metadata document for describing
the science metadata record being loaded. Some of the fields,
such as checksum and size, are based off the bytes of the science
metadata object itself. Other system metadata fields are passed
to D1ClientManager in a dict which is configured in the main
adapter program. Note that the checksum is assigned as an
arbitrary version identifier to accommodate the source system's
mutable content represented in the target system's immutable
content standard.
This is the default case. It should be specialized by each slender
nodes object.
Parameters
----------
scimeta_bytes :
Bytes of the node's original metadata document.
native_identifier_sid :
Node's system identifier for this object, which becomes the series
ID, or sid.
record_date :
Date metadata document was created/modified in the source
system. Becomes dateUploaded.
record_version : str
Will be the pid.
Returns
-------
A dict containing node-specific system metadata properties that
will apply to all science metadata documents loaded into GMN.<|endoftext|>
|
17e279fd159bd907d6ec2438ea7903477c512bed86a5693859cadc217b6d5012
|
def generate_public_access_policy(self):
'\n This function generates an access policy which is needed as\n part of system metadata for describing a science metadata object.\n In an adapter-based implementation, the ability to modify records\n is managed by the native repository, not GMN, and any changes\n in the native repository simple cascade down to GMN. This means\n it is unnecessary to set specific access policies for individual\n records. Therefore, a generic public read-only access policy\n is generated and assigned as part of system metadata to every\n record as it is loaded.\n '
accessPolicy = v2.AccessPolicy()
accessRule = v2.AccessRule()
accessRule.subject.append(d1_common.const.SUBJECT_PUBLIC)
permission = v2.Permission('write')
accessRule.permission.append(permission)
accessPolicy.append(accessRule)
return accessPolicy
|
This function generates an access policy which is needed as
part of system metadata for describing a science metadata object.
In an adapter-based implementation, the ability to modify records
is managed by the native repository, not GMN, and any changes
in the native repository simple cascade down to GMN. This means
it is unnecessary to set specific access policies for individual
records. Therefore, a generic public read-only access policy
is generated and assigned as part of system metadata to every
record as it is loaded.
|
schema_org/schema_org/core.py
|
generate_public_access_policy
|
DataONEorg/d1_ncei_adapter
| 1 |
python
|
def generate_public_access_policy(self):
'\n This function generates an access policy which is needed as\n part of system metadata for describing a science metadata object.\n In an adapter-based implementation, the ability to modify records\n is managed by the native repository, not GMN, and any changes\n in the native repository simple cascade down to GMN. This means\n it is unnecessary to set specific access policies for individual\n records. Therefore, a generic public read-only access policy\n is generated and assigned as part of system metadata to every\n record as it is loaded.\n '
accessPolicy = v2.AccessPolicy()
accessRule = v2.AccessRule()
accessRule.subject.append(d1_common.const.SUBJECT_PUBLIC)
permission = v2.Permission('write')
accessRule.permission.append(permission)
accessPolicy.append(accessRule)
return accessPolicy
|
def generate_public_access_policy(self):
'\n This function generates an access policy which is needed as\n part of system metadata for describing a science metadata object.\n In an adapter-based implementation, the ability to modify records\n is managed by the native repository, not GMN, and any changes\n in the native repository simple cascade down to GMN. This means\n it is unnecessary to set specific access policies for individual\n records. Therefore, a generic public read-only access policy\n is generated and assigned as part of system metadata to every\n record as it is loaded.\n '
accessPolicy = v2.AccessPolicy()
accessRule = v2.AccessRule()
accessRule.subject.append(d1_common.const.SUBJECT_PUBLIC)
permission = v2.Permission('write')
accessRule.permission.append(permission)
accessPolicy.append(accessRule)
return accessPolicy<|docstring|>This function generates an access policy which is needed as
part of system metadata for describing a science metadata object.
In an adapter-based implementation, the ability to modify records
is managed by the native repository, not GMN, and any changes
in the native repository simple cascade down to GMN. This means
it is unnecessary to set specific access policies for individual
records. Therefore, a generic public read-only access policy
is generated and assigned as part of system metadata to every
record as it is loaded.<|endoftext|>
|
5e43d5b699d49534a70df021cd8928e08733fb503b23c8dc1968230934a284a4
|
def get_last_harvest_time(self):
'\n Get the last time that a harvest was run on this node.\n\n Returns\n -------\n datetime of last harvest\n '
if self.ignore_harvest_time:
return None
last_harvest_time_str = self.client_mgr.get_last_harvest_time()
last_harvest_time = dateutil.parser.parse(last_harvest_time_str)
self.logger.info(f'Last harvest time: {last_harvest_time}')
return last_harvest_time
|
Get the last time that a harvest was run on this node.
Returns
-------
datetime of last harvest
|
schema_org/schema_org/core.py
|
get_last_harvest_time
|
DataONEorg/d1_ncei_adapter
| 1 |
python
|
def get_last_harvest_time(self):
'\n Get the last time that a harvest was run on this node.\n\n Returns\n -------\n datetime of last harvest\n '
if self.ignore_harvest_time:
return None
last_harvest_time_str = self.client_mgr.get_last_harvest_time()
last_harvest_time = dateutil.parser.parse(last_harvest_time_str)
self.logger.info(f'Last harvest time: {last_harvest_time}')
return last_harvest_time
|
def get_last_harvest_time(self):
'\n Get the last time that a harvest was run on this node.\n\n Returns\n -------\n datetime of last harvest\n '
if self.ignore_harvest_time:
return None
last_harvest_time_str = self.client_mgr.get_last_harvest_time()
last_harvest_time = dateutil.parser.parse(last_harvest_time_str)
self.logger.info(f'Last harvest time: {last_harvest_time}')
return last_harvest_time<|docstring|>Get the last time that a harvest was run on this node.
Returns
-------
datetime of last harvest<|endoftext|>
|
77d0a5a08beff37c7fade78bfd26d80836ca0fbac280b5db860dd674602b5277
|
def summarize(self):
'\n Produce a text summary for the logs about how the harvest went.\n '
if self.no_harvest:
return
msg = f'''GMN base URL: {self.mn_base_url}
certificate PEM path: {self._certificate}
certificate key path: {self._private_key}
'''
self.logger.info(msg)
self.logger.info('\n\n')
self.logger.info('Job Summary')
self.logger.info('===========')
self.logger.info(f'There were {self.created_count} new records.')
self.logger.info(f'There were {self.updated_count} updated records.')
self.summarize_job_records()
|
Produce a text summary for the logs about how the harvest went.
|
schema_org/schema_org/core.py
|
summarize
|
DataONEorg/d1_ncei_adapter
| 1 |
python
|
def summarize(self):
'\n \n '
if self.no_harvest:
return
msg = f'GMN base URL: {self.mn_base_url}
certificate PEM path: {self._certificate}
certificate key path: {self._private_key}
'
self.logger.info(msg)
self.logger.info('\n\n')
self.logger.info('Job Summary')
self.logger.info('===========')
self.logger.info(f'There were {self.created_count} new records.')
self.logger.info(f'There were {self.updated_count} updated records.')
self.summarize_job_records()
|
def summarize(self):
'\n \n '
if self.no_harvest:
return
msg = f'GMN base URL: {self.mn_base_url}
certificate PEM path: {self._certificate}
certificate key path: {self._private_key}
'
self.logger.info(msg)
self.logger.info('\n\n')
self.logger.info('Job Summary')
self.logger.info('===========')
self.logger.info(f'There were {self.created_count} new records.')
self.logger.info(f'There were {self.updated_count} updated records.')
self.summarize_job_records()<|docstring|>Produce a text summary for the logs about how the harvest went.<|endoftext|>
|
ad5bde3b99aef2f970c925868c6b28839e13ac318642047589b4dfb979033d87
|
def summarize_job_records(self):
'\n Summarize the job record queue. Keep this factored out of the\n summarize routine for the purpose of testing.\n '
columns = ['URL', 'Identifier', 'NumFailures', 'Result']
records = [(job.url, job.identifier, job.num_failures, job.result) for job in self.job_records]
df = pd.DataFrame.from_records(records, columns=columns)
msg = f'Successfully processed {df.Result.isnull().sum()} records.'
self.logger.info(msg)
df = df.dropna(subset=['Result'])
if (len(df) == 0):
return
fcn = (lambda x: repr(x).split('(')[0])
df = df.assign(error=(lambda df: df.Result.apply(fcn)))
columns = ['URL', 'Identifier', 'NumFailures', 'Result']
df_error = df.drop(columns, axis='columns')
df_error['count'] = 1
summary = df_error.groupby('error').sum()
self.logger.info('\n\n')
msg = f'''Error summary:
{summary}
'''
self.logger.error(msg)
|
Summarize the job record queue. Keep this factored out of the
summarize routine for the purpose of testing.
|
schema_org/schema_org/core.py
|
summarize_job_records
|
DataONEorg/d1_ncei_adapter
| 1 |
python
|
def summarize_job_records(self):
'\n Summarize the job record queue. Keep this factored out of the\n summarize routine for the purpose of testing.\n '
columns = ['URL', 'Identifier', 'NumFailures', 'Result']
records = [(job.url, job.identifier, job.num_failures, job.result) for job in self.job_records]
df = pd.DataFrame.from_records(records, columns=columns)
msg = f'Successfully processed {df.Result.isnull().sum()} records.'
self.logger.info(msg)
df = df.dropna(subset=['Result'])
if (len(df) == 0):
return
fcn = (lambda x: repr(x).split('(')[0])
df = df.assign(error=(lambda df: df.Result.apply(fcn)))
columns = ['URL', 'Identifier', 'NumFailures', 'Result']
df_error = df.drop(columns, axis='columns')
df_error['count'] = 1
summary = df_error.groupby('error').sum()
self.logger.info('\n\n')
msg = f'Error summary:
{summary}
'
self.logger.error(msg)
|
def summarize_job_records(self):
'\n Summarize the job record queue. Keep this factored out of the\n summarize routine for the purpose of testing.\n '
columns = ['URL', 'Identifier', 'NumFailures', 'Result']
records = [(job.url, job.identifier, job.num_failures, job.result) for job in self.job_records]
df = pd.DataFrame.from_records(records, columns=columns)
msg = f'Successfully processed {df.Result.isnull().sum()} records.'
self.logger.info(msg)
df = df.dropna(subset=['Result'])
if (len(df) == 0):
return
fcn = (lambda x: repr(x).split('(')[0])
df = df.assign(error=(lambda df: df.Result.apply(fcn)))
columns = ['URL', 'Identifier', 'NumFailures', 'Result']
df_error = df.drop(columns, axis='columns')
df_error['count'] = 1
summary = df_error.groupby('error').sum()
self.logger.info('\n\n')
msg = f'Error summary:
{summary}
'
self.logger.error(msg)<|docstring|>Summarize the job record queue. Keep this factored out of the
summarize routine for the purpose of testing.<|endoftext|>
|
b23d3c6152d0c4fe62c5c7a2bd245140ab1e74f8f1d4706a6fc5a143fd477a9a
|
async def shutdown(self):
"\n Clean up tasks tied to the service's shutdown.\n "
msg = 'Shutting down...'
self.logger.info(msg)
tasks = [t for t in asyncio.all_tasks() if (t is not asyncio.current_task())]
msg = f'Cancelling {len(tasks)} outstanding tasks.'
self.logger.info(msg)
[task.cancel() for task in tasks]
(await asyncio.gather(*tasks, return_exceptions=True))
|
Clean up tasks tied to the service's shutdown.
|
schema_org/schema_org/core.py
|
shutdown
|
DataONEorg/d1_ncei_adapter
| 1 |
python
|
async def shutdown(self):
"\n \n "
msg = 'Shutting down...'
self.logger.info(msg)
tasks = [t for t in asyncio.all_tasks() if (t is not asyncio.current_task())]
msg = f'Cancelling {len(tasks)} outstanding tasks.'
self.logger.info(msg)
[task.cancel() for task in tasks]
(await asyncio.gather(*tasks, return_exceptions=True))
|
async def shutdown(self):
"\n \n "
msg = 'Shutting down...'
self.logger.info(msg)
tasks = [t for t in asyncio.all_tasks() if (t is not asyncio.current_task())]
msg = f'Cancelling {len(tasks)} outstanding tasks.'
self.logger.info(msg)
[task.cancel() for task in tasks]
(await asyncio.gather(*tasks, return_exceptions=True))<|docstring|>Clean up tasks tied to the service's shutdown.<|endoftext|>
|
b23d08e019d2ffff51a205d78a06a670a95fb6a7828ad9891b0dcd02b545dcf2
|
async def harvest_document(self, sid, pid, doc, record_date):
'\n Check if the member node has seen the document before and decide how to\n harvest it (or not) accordingly.\n\n Parameters\n ----------\n sid : str\n Handle used to identify objects uniquely. Also known as the\n series identifier.\n pid : str\n Record version.\n doc : bytes\n serialized version of XML metadata document\n record_date : datetime obj\n Last document modification time according to the site map.\n '
self.logger.debug(f'harvest_document: {sid}')
docbytes = lxml.etree.tostring(doc, pretty_print=True, encoding='utf-8', standalone=True)
kwargs = {'scimeta_bytes': docbytes, 'native_identifier_sid': sid, 'record_date': record_date, 'record_version': pid}
sys_metadata = self.generate_system_metadata(**kwargs)
exists_dict = self.client_mgr.check_if_identifier_exists(sid)
if ((exists_dict['outcome'] == 'yes') and (exists_dict['record_date'] > record_date)):
msg = f"Skipping {sid}, it already exists but GMN claims that its record date {exists_dict['record_date']} is later than the lastModified time claimed by the landing page {record_date}. The owners of the site should check their sitemap against their documents."
raise SkipError(msg)
elif ((exists_dict['outcome'] == 'yes') and (exists_dict['record_date'] < record_date)):
current_sid = exists_dict['current_version_id']
kwargs = {'sci_metadata_bytes': docbytes, 'native_identifier_sid': sid, 'record_date': record_date, 'old_version_pid': exists_dict['current_version_id'], 'system_metadata': sys_metadata}
if self.client_mgr.update_science_metadata(**kwargs):
self.updated_count += 1
self.logger.info(f'Updated {sid}.')
else:
msg = f"Failed to UPDATE object with SID: {sid} / PID: {exists_dict['current_version_id']}"
self.logger.error(msg)
raise UnableToUpdateGmnRecord(f'Unable to update {sid}.')
elif ((exists_dict['outcome'] == 'yes') and (exists_dict['record_date'] == record_date)):
msg = f'Skipping {sid}, it already exists and has the same record date {record_date}'
raise SkipError(msg)
elif (exists_dict['outcome'] == 'failed'):
msg = f'The existance check for {sid} failed.'
self.logger.warning(msg)
elif (exists_dict['outcome'] == 'no'):
kwargs = {'sci_metadata_bytes': docbytes, 'native_identifier_sid': sid, 'record_date': record_date, 'system_metadata': sys_metadata}
if self.client_mgr.load_science_metadata(**kwargs):
self.created_count += 1
msg = f'Created a new object identified as {sid}'
self.logger.info(msg)
else:
msg = f'Unable to create new object identified as {sid}.'
raise UnableToCreateNewGMNObject(msg)
|
Check if the member node has seen the document before and decide how to
harvest it (or not) accordingly.
Parameters
----------
sid : str
Handle used to identify objects uniquely. Also known as the
series identifier.
pid : str
Record version.
doc : bytes
serialized version of XML metadata document
record_date : datetime obj
Last document modification time according to the site map.
|
schema_org/schema_org/core.py
|
harvest_document
|
DataONEorg/d1_ncei_adapter
| 1 |
python
|
async def harvest_document(self, sid, pid, doc, record_date):
'\n Check if the member node has seen the document before and decide how to\n harvest it (or not) accordingly.\n\n Parameters\n ----------\n sid : str\n Handle used to identify objects uniquely. Also known as the\n series identifier.\n pid : str\n Record version.\n doc : bytes\n serialized version of XML metadata document\n record_date : datetime obj\n Last document modification time according to the site map.\n '
self.logger.debug(f'harvest_document: {sid}')
docbytes = lxml.etree.tostring(doc, pretty_print=True, encoding='utf-8', standalone=True)
kwargs = {'scimeta_bytes': docbytes, 'native_identifier_sid': sid, 'record_date': record_date, 'record_version': pid}
sys_metadata = self.generate_system_metadata(**kwargs)
exists_dict = self.client_mgr.check_if_identifier_exists(sid)
if ((exists_dict['outcome'] == 'yes') and (exists_dict['record_date'] > record_date)):
msg = f"Skipping {sid}, it already exists but GMN claims that its record date {exists_dict['record_date']} is later than the lastModified time claimed by the landing page {record_date}. The owners of the site should check their sitemap against their documents."
raise SkipError(msg)
elif ((exists_dict['outcome'] == 'yes') and (exists_dict['record_date'] < record_date)):
current_sid = exists_dict['current_version_id']
kwargs = {'sci_metadata_bytes': docbytes, 'native_identifier_sid': sid, 'record_date': record_date, 'old_version_pid': exists_dict['current_version_id'], 'system_metadata': sys_metadata}
if self.client_mgr.update_science_metadata(**kwargs):
self.updated_count += 1
self.logger.info(f'Updated {sid}.')
else:
msg = f"Failed to UPDATE object with SID: {sid} / PID: {exists_dict['current_version_id']}"
self.logger.error(msg)
raise UnableToUpdateGmnRecord(f'Unable to update {sid}.')
elif ((exists_dict['outcome'] == 'yes') and (exists_dict['record_date'] == record_date)):
msg = f'Skipping {sid}, it already exists and has the same record date {record_date}'
raise SkipError(msg)
elif (exists_dict['outcome'] == 'failed'):
msg = f'The existance check for {sid} failed.'
self.logger.warning(msg)
elif (exists_dict['outcome'] == 'no'):
kwargs = {'sci_metadata_bytes': docbytes, 'native_identifier_sid': sid, 'record_date': record_date, 'system_metadata': sys_metadata}
if self.client_mgr.load_science_metadata(**kwargs):
self.created_count += 1
msg = f'Created a new object identified as {sid}'
self.logger.info(msg)
else:
msg = f'Unable to create new object identified as {sid}.'
raise UnableToCreateNewGMNObject(msg)
|
async def harvest_document(self, sid, pid, doc, record_date):
'\n Check if the member node has seen the document before and decide how to\n harvest it (or not) accordingly.\n\n Parameters\n ----------\n sid : str\n Handle used to identify objects uniquely. Also known as the\n series identifier.\n pid : str\n Record version.\n doc : bytes\n serialized version of XML metadata document\n record_date : datetime obj\n Last document modification time according to the site map.\n '
self.logger.debug(f'harvest_document: {sid}')
docbytes = lxml.etree.tostring(doc, pretty_print=True, encoding='utf-8', standalone=True)
kwargs = {'scimeta_bytes': docbytes, 'native_identifier_sid': sid, 'record_date': record_date, 'record_version': pid}
sys_metadata = self.generate_system_metadata(**kwargs)
exists_dict = self.client_mgr.check_if_identifier_exists(sid)
if ((exists_dict['outcome'] == 'yes') and (exists_dict['record_date'] > record_date)):
msg = f"Skipping {sid}, it already exists but GMN claims that its record date {exists_dict['record_date']} is later than the lastModified time claimed by the landing page {record_date}. The owners of the site should check their sitemap against their documents."
raise SkipError(msg)
elif ((exists_dict['outcome'] == 'yes') and (exists_dict['record_date'] < record_date)):
current_sid = exists_dict['current_version_id']
kwargs = {'sci_metadata_bytes': docbytes, 'native_identifier_sid': sid, 'record_date': record_date, 'old_version_pid': exists_dict['current_version_id'], 'system_metadata': sys_metadata}
if self.client_mgr.update_science_metadata(**kwargs):
self.updated_count += 1
self.logger.info(f'Updated {sid}.')
else:
msg = f"Failed to UPDATE object with SID: {sid} / PID: {exists_dict['current_version_id']}"
self.logger.error(msg)
raise UnableToUpdateGmnRecord(f'Unable to update {sid}.')
elif ((exists_dict['outcome'] == 'yes') and (exists_dict['record_date'] == record_date)):
msg = f'Skipping {sid}, it already exists and has the same record date {record_date}'
raise SkipError(msg)
elif (exists_dict['outcome'] == 'failed'):
msg = f'The existance check for {sid} failed.'
self.logger.warning(msg)
elif (exists_dict['outcome'] == 'no'):
kwargs = {'sci_metadata_bytes': docbytes, 'native_identifier_sid': sid, 'record_date': record_date, 'system_metadata': sys_metadata}
if self.client_mgr.load_science_metadata(**kwargs):
self.created_count += 1
msg = f'Created a new object identified as {sid}'
self.logger.info(msg)
else:
msg = f'Unable to create new object identified as {sid}.'
raise UnableToCreateNewGMNObject(msg)<|docstring|>Check if the member node has seen the document before and decide how to
harvest it (or not) accordingly.
Parameters
----------
sid : str
Handle used to identify objects uniquely. Also known as the
series identifier.
pid : str
Record version.
doc : bytes
serialized version of XML metadata document
record_date : datetime obj
Last document modification time according to the site map.<|endoftext|>
|
0f3af649d90d25230502f5a1681e36521c7817e862669d528592ac9ea402579a
|
def is_sitemap_index_file(self, doc):
'\n Answer the question as to whether the document found at the other end\n of the sitemap URL is a sitemap index file - i.e. it references other\n sitemaps - or if it is a sitemap leaf.\n\n Parameters\n ----------\n doc : ElementTree\n the sitemap XML document loaded into an ElementTree object\n\n Returns\n -------\n True or False, whether or not the document is a sitemap index file.\n '
elts = doc.xpath('sm:sitemap', namespaces=SITEMAP_NS)
if (len(elts) > 0):
return True
else:
return False
|
Answer the question as to whether the document found at the other end
of the sitemap URL is a sitemap index file - i.e. it references other
sitemaps - or if it is a sitemap leaf.
Parameters
----------
doc : ElementTree
the sitemap XML document loaded into an ElementTree object
Returns
-------
True or False, whether or not the document is a sitemap index file.
|
schema_org/schema_org/core.py
|
is_sitemap_index_file
|
DataONEorg/d1_ncei_adapter
| 1 |
python
|
def is_sitemap_index_file(self, doc):
'\n Answer the question as to whether the document found at the other end\n of the sitemap URL is a sitemap index file - i.e. it references other\n sitemaps - or if it is a sitemap leaf.\n\n Parameters\n ----------\n doc : ElementTree\n the sitemap XML document loaded into an ElementTree object\n\n Returns\n -------\n True or False, whether or not the document is a sitemap index file.\n '
elts = doc.xpath('sm:sitemap', namespaces=SITEMAP_NS)
if (len(elts) > 0):
return True
else:
return False
|
def is_sitemap_index_file(self, doc):
'\n Answer the question as to whether the document found at the other end\n of the sitemap URL is a sitemap index file - i.e. it references other\n sitemaps - or if it is a sitemap leaf.\n\n Parameters\n ----------\n doc : ElementTree\n the sitemap XML document loaded into an ElementTree object\n\n Returns\n -------\n True or False, whether or not the document is a sitemap index file.\n '
elts = doc.xpath('sm:sitemap', namespaces=SITEMAP_NS)
if (len(elts) > 0):
return True
else:
return False<|docstring|>Answer the question as to whether the document found at the other end
of the sitemap URL is a sitemap index file - i.e. it references other
sitemaps - or if it is a sitemap leaf.
Parameters
----------
doc : ElementTree
the sitemap XML document loaded into an ElementTree object
Returns
-------
True or False, whether or not the document is a sitemap index file.<|endoftext|>
|
116c78c3ea016847dc2c6c1937186c6224f51a34f7eb25d717a19a62c6c02829
|
def extract_records_from_sitemap(self, doc):
'\n Extract all the URLs and lastmod times from an XML sitemap.\n\n Parameters\n ----------\n doc : ElementTree\n the sitemap XML document loaded into an ElementTree object\n\n Returns\n -------\n List of tuples, each consisting of a URL for a metadata document and\n its associated last modification time.\n '
urls = doc.xpath(self.SITEMAP_URL_PATH, namespaces=self.SITEMAP_NAMESPACE)
urls = [(f'{self.sitemap_url}/{url}' if (not url.startswith('http')) else url) for url in urls]
lastmods = doc.xpath(self.SITEMAP_LASTMOD_PATH, namespaces=self.SITEMAP_NAMESPACE)
if (len(lastmods) == 0):
lastmods = [_TOO_OLD_HARVEST_DATETIME for url in urls]
else:
lastmods = [dateutil.parser.parse(item) for item in lastmods]
UTC = dateutil.tz.gettz('UTC')
lastmods = [dateitem.replace(tzinfo=(dateitem.tzinfo or UTC)) for dateitem in lastmods]
records = [(url, lastmod) for (url, lastmod) in zip(urls, lastmods)]
msg = f'Extracted {len(records)} from the sitemap document.'
self.logger.info(msg)
return records
|
Extract all the URLs and lastmod times from an XML sitemap.
Parameters
----------
doc : ElementTree
the sitemap XML document loaded into an ElementTree object
Returns
-------
List of tuples, each consisting of a URL for a metadata document and
its associated last modification time.
|
schema_org/schema_org/core.py
|
extract_records_from_sitemap
|
DataONEorg/d1_ncei_adapter
| 1 |
python
|
def extract_records_from_sitemap(self, doc):
'\n Extract all the URLs and lastmod times from an XML sitemap.\n\n Parameters\n ----------\n doc : ElementTree\n the sitemap XML document loaded into an ElementTree object\n\n Returns\n -------\n List of tuples, each consisting of a URL for a metadata document and\n its associated last modification time.\n '
urls = doc.xpath(self.SITEMAP_URL_PATH, namespaces=self.SITEMAP_NAMESPACE)
urls = [(f'{self.sitemap_url}/{url}' if (not url.startswith('http')) else url) for url in urls]
lastmods = doc.xpath(self.SITEMAP_LASTMOD_PATH, namespaces=self.SITEMAP_NAMESPACE)
if (len(lastmods) == 0):
lastmods = [_TOO_OLD_HARVEST_DATETIME for url in urls]
else:
lastmods = [dateutil.parser.parse(item) for item in lastmods]
UTC = dateutil.tz.gettz('UTC')
lastmods = [dateitem.replace(tzinfo=(dateitem.tzinfo or UTC)) for dateitem in lastmods]
records = [(url, lastmod) for (url, lastmod) in zip(urls, lastmods)]
msg = f'Extracted {len(records)} from the sitemap document.'
self.logger.info(msg)
return records
|
def extract_records_from_sitemap(self, doc):
'\n Extract all the URLs and lastmod times from an XML sitemap.\n\n Parameters\n ----------\n doc : ElementTree\n the sitemap XML document loaded into an ElementTree object\n\n Returns\n -------\n List of tuples, each consisting of a URL for a metadata document and\n its associated last modification time.\n '
urls = doc.xpath(self.SITEMAP_URL_PATH, namespaces=self.SITEMAP_NAMESPACE)
urls = [(f'{self.sitemap_url}/{url}' if (not url.startswith('http')) else url) for url in urls]
lastmods = doc.xpath(self.SITEMAP_LASTMOD_PATH, namespaces=self.SITEMAP_NAMESPACE)
if (len(lastmods) == 0):
lastmods = [_TOO_OLD_HARVEST_DATETIME for url in urls]
else:
lastmods = [dateutil.parser.parse(item) for item in lastmods]
UTC = dateutil.tz.gettz('UTC')
lastmods = [dateitem.replace(tzinfo=(dateitem.tzinfo or UTC)) for dateitem in lastmods]
records = [(url, lastmod) for (url, lastmod) in zip(urls, lastmods)]
msg = f'Extracted {len(records)} from the sitemap document.'
self.logger.info(msg)
return records<|docstring|>Extract all the URLs and lastmod times from an XML sitemap.
Parameters
----------
doc : ElementTree
the sitemap XML document loaded into an ElementTree object
Returns
-------
List of tuples, each consisting of a URL for a metadata document and
its associated last modification time.<|endoftext|>
|
791b2de15f3290a94c8ce2578b10be31fe6615234f33fff9c3798eb8b7e625ef
|
def post_process_sitemap_records(self, records, last_harvest_time):
'\n Prune the sitemap records for various reasons. These might include:\n\n i)\n pruning any records that are older than the last harvest time\n IFF we are not directed to ignore the last harvest time\n ii)\n pruning records that do not match a regex IFF we are directed\n to use a regex\n iii)\n pruning records if we are limiting the number of documents that\n we are willing to process\n\n Parameters\n ----------\n records : list\n Each item in the list is composed of a URL and a last modification\n date.\n '
nrecs = len(records)
if (not self.ignore_harvest_time):
records = [(url, lastmod) for (url, lastmod) in records if (lastmod > last_harvest_time)]
nskipped = (nrecs - len(records))
msg = f'{nskipped} records skipped due to last harvest time {last_harvest_time} > lastmod times.'
self.logger.info(msg)
if (self.regex is not None):
nrecs = len(records)
records = [(url, lastmod) for (url, lastmod) in records if self.regex.search(url)]
num_skipped = (nrecs - len(records))
msg = f'{num_skipped} records skipped due to regex restriction.'
self.logger.info(msg)
self.num_records_processed += len(records)
if ((self.num_records_processed > self.num_documents) and (self.num_documents > (- 1))):
diff = (self.num_records_processed - self.num_documents)
records = records[:(- diff)]
msg = f'Looking to process {len(records)} records...'
self.logger.info(msg)
return records
|
Prune the sitemap records for various reasons. These might include:
i)
pruning any records that are older than the last harvest time
IFF we are not directed to ignore the last harvest time
ii)
pruning records that do not match a regex IFF we are directed
to use a regex
iii)
pruning records if we are limiting the number of documents that
we are willing to process
Parameters
----------
records : list
Each item in the list is composed of a URL and a last modification
date.
|
schema_org/schema_org/core.py
|
post_process_sitemap_records
|
DataONEorg/d1_ncei_adapter
| 1 |
python
|
def post_process_sitemap_records(self, records, last_harvest_time):
'\n Prune the sitemap records for various reasons. These might include:\n\n i)\n pruning any records that are older than the last harvest time\n IFF we are not directed to ignore the last harvest time\n ii)\n pruning records that do not match a regex IFF we are directed\n to use a regex\n iii)\n pruning records if we are limiting the number of documents that\n we are willing to process\n\n Parameters\n ----------\n records : list\n Each item in the list is composed of a URL and a last modification\n date.\n '
nrecs = len(records)
if (not self.ignore_harvest_time):
records = [(url, lastmod) for (url, lastmod) in records if (lastmod > last_harvest_time)]
nskipped = (nrecs - len(records))
msg = f'{nskipped} records skipped due to last harvest time {last_harvest_time} > lastmod times.'
self.logger.info(msg)
if (self.regex is not None):
nrecs = len(records)
records = [(url, lastmod) for (url, lastmod) in records if self.regex.search(url)]
num_skipped = (nrecs - len(records))
msg = f'{num_skipped} records skipped due to regex restriction.'
self.logger.info(msg)
self.num_records_processed += len(records)
if ((self.num_records_processed > self.num_documents) and (self.num_documents > (- 1))):
diff = (self.num_records_processed - self.num_documents)
records = records[:(- diff)]
msg = f'Looking to process {len(records)} records...'
self.logger.info(msg)
return records
|
def post_process_sitemap_records(self, records, last_harvest_time):
'\n Prune the sitemap records for various reasons. These might include:\n\n i)\n pruning any records that are older than the last harvest time\n IFF we are not directed to ignore the last harvest time\n ii)\n pruning records that do not match a regex IFF we are directed\n to use a regex\n iii)\n pruning records if we are limiting the number of documents that\n we are willing to process\n\n Parameters\n ----------\n records : list\n Each item in the list is composed of a URL and a last modification\n date.\n '
nrecs = len(records)
if (not self.ignore_harvest_time):
records = [(url, lastmod) for (url, lastmod) in records if (lastmod > last_harvest_time)]
nskipped = (nrecs - len(records))
msg = f'{nskipped} records skipped due to last harvest time {last_harvest_time} > lastmod times.'
self.logger.info(msg)
if (self.regex is not None):
nrecs = len(records)
records = [(url, lastmod) for (url, lastmod) in records if self.regex.search(url)]
num_skipped = (nrecs - len(records))
msg = f'{num_skipped} records skipped due to regex restriction.'
self.logger.info(msg)
self.num_records_processed += len(records)
if ((self.num_records_processed > self.num_documents) and (self.num_documents > (- 1))):
diff = (self.num_records_processed - self.num_documents)
records = records[:(- diff)]
msg = f'Looking to process {len(records)} records...'
self.logger.info(msg)
return records<|docstring|>Prune the sitemap records for various reasons. These might include:
i)
pruning any records that are older than the last harvest time
IFF we are not directed to ignore the last harvest time
ii)
pruning records that do not match a regex IFF we are directed
to use a regex
iii)
pruning records if we are limiting the number of documents that
we are willing to process
Parameters
----------
records : list
Each item in the list is composed of a URL and a last modification
date.<|endoftext|>
|
a0584bd0aa9dbba14e1e120b3e37c30bc4f1dccfbfdd0ffed7ec5d26a9da1e7b
|
async def retrieve_url(self, url, headers=None):
'\n Return the contents pointed to by a URL.\n\n Parameters\n ----------\n url : str\n URL of either an HTML document or an XML metadata document\n headers : dict\n Optional headers to supply with the retrieval.\n\n Returns\n -------\n Binary contents of the body of the response object, response headers\n '
self.logger.debug(f'Retrieving URL {url}')
headers = {'User-Agent': 'DataONE adapter for schema.org harvest', 'From': '[email protected]'}
connector = aiohttp.TCPConnector(ssl=False)
async with aiohttp.ClientSession(headers=headers, connector=connector) as session:
async with session.get(url) as response:
response.raise_for_status()
return ((await response.read()), response.headers)
|
Return the contents pointed to by a URL.
Parameters
----------
url : str
URL of either an HTML document or an XML metadata document
headers : dict
Optional headers to supply with the retrieval.
Returns
-------
Binary contents of the body of the response object, response headers
|
schema_org/schema_org/core.py
|
retrieve_url
|
DataONEorg/d1_ncei_adapter
| 1 |
python
|
async def retrieve_url(self, url, headers=None):
'\n Return the contents pointed to by a URL.\n\n Parameters\n ----------\n url : str\n URL of either an HTML document or an XML metadata document\n headers : dict\n Optional headers to supply with the retrieval.\n\n Returns\n -------\n Binary contents of the body of the response object, response headers\n '
self.logger.debug(f'Retrieving URL {url}')
headers = {'User-Agent': 'DataONE adapter for schema.org harvest', 'From': '[email protected]'}
connector = aiohttp.TCPConnector(ssl=False)
async with aiohttp.ClientSession(headers=headers, connector=connector) as session:
async with session.get(url) as response:
response.raise_for_status()
return ((await response.read()), response.headers)
|
async def retrieve_url(self, url, headers=None):
'\n Return the contents pointed to by a URL.\n\n Parameters\n ----------\n url : str\n URL of either an HTML document or an XML metadata document\n headers : dict\n Optional headers to supply with the retrieval.\n\n Returns\n -------\n Binary contents of the body of the response object, response headers\n '
self.logger.debug(f'Retrieving URL {url}')
headers = {'User-Agent': 'DataONE adapter for schema.org harvest', 'From': '[email protected]'}
connector = aiohttp.TCPConnector(ssl=False)
async with aiohttp.ClientSession(headers=headers, connector=connector) as session:
async with session.get(url) as response:
response.raise_for_status()
return ((await response.read()), response.headers)<|docstring|>Return the contents pointed to by a URL.
Parameters
----------
url : str
URL of either an HTML document or an XML metadata document
headers : dict
Optional headers to supply with the retrieval.
Returns
-------
Binary contents of the body of the response object, response headers<|endoftext|>
|
57fc447da3455c528d7d3d32c86be189a1a4f98d61429587e692cf169f99b3c9
|
def check_xml_headers(self, headers):
'\n Check the headers returned by the sitemap request response.\n\n Parameters\n ----------\n headers : dict\n HTTP response headers\n '
self.logger.debug('Checking XML headers...')
exp_headers = ['text/xml', 'text/xml; charset=utf-8', 'text/xml;charset=utf-8', 'application/x-gzip', 'application/xml']
if (headers['Content-Type'].lower() not in exp_headers):
msg = f'get_sitemap_document: headers are {headers}'
self.logger.debug(msg)
self.logger.warning(SITEMAP_NOT_XML_MESSAGE)
|
Check the headers returned by the sitemap request response.
Parameters
----------
headers : dict
HTTP response headers
|
schema_org/schema_org/core.py
|
check_xml_headers
|
DataONEorg/d1_ncei_adapter
| 1 |
python
|
def check_xml_headers(self, headers):
'\n Check the headers returned by the sitemap request response.\n\n Parameters\n ----------\n headers : dict\n HTTP response headers\n '
self.logger.debug('Checking XML headers...')
exp_headers = ['text/xml', 'text/xml; charset=utf-8', 'text/xml;charset=utf-8', 'application/x-gzip', 'application/xml']
if (headers['Content-Type'].lower() not in exp_headers):
msg = f'get_sitemap_document: headers are {headers}'
self.logger.debug(msg)
self.logger.warning(SITEMAP_NOT_XML_MESSAGE)
|
def check_xml_headers(self, headers):
'\n Check the headers returned by the sitemap request response.\n\n Parameters\n ----------\n headers : dict\n HTTP response headers\n '
self.logger.debug('Checking XML headers...')
exp_headers = ['text/xml', 'text/xml; charset=utf-8', 'text/xml;charset=utf-8', 'application/x-gzip', 'application/xml']
if (headers['Content-Type'].lower() not in exp_headers):
msg = f'get_sitemap_document: headers are {headers}'
self.logger.debug(msg)
self.logger.warning(SITEMAP_NOT_XML_MESSAGE)<|docstring|>Check the headers returned by the sitemap request response.
Parameters
----------
headers : dict
HTTP response headers<|endoftext|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.