body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
0ab9dc29f4edf199279d1c3d4e2b4b62700990f0fa8d90e0e3fde6e2ca115e9b | def t(*shape: Dimension) -> TensorType:
'\n Creates an object with the given shape, for testing.\n '
return TestShaped(shape) | Creates an object with the given shape, for testing. | tests/gpflow/experimental/check_shapes/utils.py | t | joelberkeley/GPflow | 0 | python | def t(*shape: Dimension) -> TensorType:
'\n \n '
return TestShaped(shape) | def t(*shape: Dimension) -> TensorType:
'\n \n '
return TestShaped(shape)<|docstring|>Creates an object with the given shape, for testing.<|endoftext|> |
5416f81381da8b1d07b3144b753f7ed095235a5933a48a44c9e8a761e77f776d | def update_availability_zone(ec2, availability_zone: str, volumes: List[AbstractInstanceVolume]):
"Checks that existing volumes located in the same AZ and the AZ from the\n config file matches volumes AZ.\n\n Args:\n ec2: EC2 boto3 client\n availability_zone: Availability Zone from the configuration.\n volumes: List of volume objects.\n\n Returns:\n The final AZ where the instance should be run or an empty string if\n the instance can be run in any AZ.\n\n Raises:\n ValueError: AZ in the config file doesn't match the AZs of the volumes or\n AZs of the volumes are different.\n "
availability_zone = availability_zone
for volume in volumes:
if isinstance(volume, EbsVolume):
ec2_volume = Volume.get_by_name(ec2, volume.ec2_volume_name)
if ec2_volume:
if (availability_zone and (availability_zone != ec2_volume.availability_zone)):
raise ValueError("The availability zone in the configuration file doesn't match the availability zone of the existing volume or you have two existing volumes in different availability zones.")
availability_zone = ec2_volume.availability_zone
return availability_zone | Checks that existing volumes located in the same AZ and the AZ from the
config file matches volumes AZ.
Args:
ec2: EC2 boto3 client
availability_zone: Availability Zone from the configuration.
volumes: List of volume objects.
Returns:
The final AZ where the instance should be run or an empty string if
the instance can be run in any AZ.
Raises:
ValueError: AZ in the config file doesn't match the AZs of the volumes or
AZs of the volumes are different. | spotty/providers/aws/helpers/availability_zone.py | update_availability_zone | wilmeragsgh/spotty | 246 | python | def update_availability_zone(ec2, availability_zone: str, volumes: List[AbstractInstanceVolume]):
"Checks that existing volumes located in the same AZ and the AZ from the\n config file matches volumes AZ.\n\n Args:\n ec2: EC2 boto3 client\n availability_zone: Availability Zone from the configuration.\n volumes: List of volume objects.\n\n Returns:\n The final AZ where the instance should be run or an empty string if\n the instance can be run in any AZ.\n\n Raises:\n ValueError: AZ in the config file doesn't match the AZs of the volumes or\n AZs of the volumes are different.\n "
availability_zone = availability_zone
for volume in volumes:
if isinstance(volume, EbsVolume):
ec2_volume = Volume.get_by_name(ec2, volume.ec2_volume_name)
if ec2_volume:
if (availability_zone and (availability_zone != ec2_volume.availability_zone)):
raise ValueError("The availability zone in the configuration file doesn't match the availability zone of the existing volume or you have two existing volumes in different availability zones.")
availability_zone = ec2_volume.availability_zone
return availability_zone | def update_availability_zone(ec2, availability_zone: str, volumes: List[AbstractInstanceVolume]):
"Checks that existing volumes located in the same AZ and the AZ from the\n config file matches volumes AZ.\n\n Args:\n ec2: EC2 boto3 client\n availability_zone: Availability Zone from the configuration.\n volumes: List of volume objects.\n\n Returns:\n The final AZ where the instance should be run or an empty string if\n the instance can be run in any AZ.\n\n Raises:\n ValueError: AZ in the config file doesn't match the AZs of the volumes or\n AZs of the volumes are different.\n "
availability_zone = availability_zone
for volume in volumes:
if isinstance(volume, EbsVolume):
ec2_volume = Volume.get_by_name(ec2, volume.ec2_volume_name)
if ec2_volume:
if (availability_zone and (availability_zone != ec2_volume.availability_zone)):
raise ValueError("The availability zone in the configuration file doesn't match the availability zone of the existing volume or you have two existing volumes in different availability zones.")
availability_zone = ec2_volume.availability_zone
return availability_zone<|docstring|>Checks that existing volumes located in the same AZ and the AZ from the
config file matches volumes AZ.
Args:
ec2: EC2 boto3 client
availability_zone: Availability Zone from the configuration.
volumes: List of volume objects.
Returns:
The final AZ where the instance should be run or an empty string if
the instance can be run in any AZ.
Raises:
ValueError: AZ in the config file doesn't match the AZs of the volumes or
AZs of the volumes are different.<|endoftext|> |
13053b9ea0a6aa56558a8f00163c64a216b025f5db7a0266ac25136350a1ce98 | def fit(self, X, Y2, learning_rate=0.01, max_epochs=1000):
'\n Takes in arguments X and Y. The learning rate is defined by the user as well as the max_epochs.\n\n learning_rate = 1, 0.1, 0.01, 0.001, 0.0001, ens. \n\n max_epochs is the number of iterations, the higher == higher accuracy\n '
(N, D) = X.shape
self.w = np.random.randn(D)
self.b = 0
costs = []
for epoch in range(max_epochs):
Y_hat = self.predict(X)
P = np.argmax(Y_hat, axis=1)
incorrect = np.nonzero((Y2 != P))[0]
if (len(incorrect) == 0):
break
i = np.random.choice(incorrect)
self.w += ((learning_rate * X[i]) * Y2[i].T)
self.b += (learning_rate * Y2[i])
c = (len(incorrect) / float(N))
costs.append(c)
print(('Final w: %s Final b: %s Number of epochs: %s / %s' % (self.w, self.b, (epoch + 1), max_epochs)))
plt.plot(costs, label='costs')
plt.legend()
plt.show() | Takes in arguments X and Y. The learning rate is defined by the user as well as the max_epochs.
learning_rate = 1, 0.1, 0.01, 0.001, 0.0001, ens.
max_epochs is the number of iterations, the higher == higher accuracy | Supervised Machine Learning/Perceptron/Perceptron_multi.py | fit | marcelkotze007/mk007---ML-Python-library | 0 | python | def fit(self, X, Y2, learning_rate=0.01, max_epochs=1000):
'\n Takes in arguments X and Y. The learning rate is defined by the user as well as the max_epochs.\n\n learning_rate = 1, 0.1, 0.01, 0.001, 0.0001, ens. \n\n max_epochs is the number of iterations, the higher == higher accuracy\n '
(N, D) = X.shape
self.w = np.random.randn(D)
self.b = 0
costs = []
for epoch in range(max_epochs):
Y_hat = self.predict(X)
P = np.argmax(Y_hat, axis=1)
incorrect = np.nonzero((Y2 != P))[0]
if (len(incorrect) == 0):
break
i = np.random.choice(incorrect)
self.w += ((learning_rate * X[i]) * Y2[i].T)
self.b += (learning_rate * Y2[i])
c = (len(incorrect) / float(N))
costs.append(c)
print(('Final w: %s Final b: %s Number of epochs: %s / %s' % (self.w, self.b, (epoch + 1), max_epochs)))
plt.plot(costs, label='costs')
plt.legend()
plt.show() | def fit(self, X, Y2, learning_rate=0.01, max_epochs=1000):
'\n Takes in arguments X and Y. The learning rate is defined by the user as well as the max_epochs.\n\n learning_rate = 1, 0.1, 0.01, 0.001, 0.0001, ens. \n\n max_epochs is the number of iterations, the higher == higher accuracy\n '
(N, D) = X.shape
self.w = np.random.randn(D)
self.b = 0
costs = []
for epoch in range(max_epochs):
Y_hat = self.predict(X)
P = np.argmax(Y_hat, axis=1)
incorrect = np.nonzero((Y2 != P))[0]
if (len(incorrect) == 0):
break
i = np.random.choice(incorrect)
self.w += ((learning_rate * X[i]) * Y2[i].T)
self.b += (learning_rate * Y2[i])
c = (len(incorrect) / float(N))
costs.append(c)
print(('Final w: %s Final b: %s Number of epochs: %s / %s' % (self.w, self.b, (epoch + 1), max_epochs)))
plt.plot(costs, label='costs')
plt.legend()
plt.show()<|docstring|>Takes in arguments X and Y. The learning rate is defined by the user as well as the max_epochs.
learning_rate = 1, 0.1, 0.01, 0.001, 0.0001, ens.
max_epochs is the number of iterations, the higher == higher accuracy<|endoftext|> |
9089694ace03d99caf78b056c4d27c6dd24dbeee1953a32902e6d2955fe5d05c | @staticmethod
def random_users():
'\n based on the number of the active users in each day, randomly select user ids from user table\n :return: list of the active user ids\n '
ids_lst = User.load_all_ids_from_db()
selected_users = list()
users_In_Day = random.randint(10, (len(ids_lst[0]) - 1))
for i in range(users_In_Day):
idx = random.randint(0, (len(ids_lst[0]) - 1))
selected_users.append(ids_lst[0][idx][0])
return selected_users | based on the number of the active users in each day, randomly select user ids from user table
:return: list of the active user ids | chainedSCT/extraction/location_Extraction.py | random_users | MSBeni/SmartContactTracing_Chained | 1 | python | @staticmethod
def random_users():
'\n based on the number of the active users in each day, randomly select user ids from user table\n :return: list of the active user ids\n '
ids_lst = User.load_all_ids_from_db()
selected_users = list()
users_In_Day = random.randint(10, (len(ids_lst[0]) - 1))
for i in range(users_In_Day):
idx = random.randint(0, (len(ids_lst[0]) - 1))
selected_users.append(ids_lst[0][idx][0])
return selected_users | @staticmethod
def random_users():
'\n based on the number of the active users in each day, randomly select user ids from user table\n :return: list of the active user ids\n '
ids_lst = User.load_all_ids_from_db()
selected_users = list()
users_In_Day = random.randint(10, (len(ids_lst[0]) - 1))
for i in range(users_In_Day):
idx = random.randint(0, (len(ids_lst[0]) - 1))
selected_users.append(ids_lst[0][idx][0])
return selected_users<|docstring|>based on the number of the active users in each day, randomly select user ids from user table
:return: list of the active user ids<|endoftext|> |
586531daff81a97968287eb9a6a646eccb55b4aacf2f792db2921a37e6e652af | @classmethod
def random_user_location(cls, step_size=0.5):
"\n create locations based on defined step size and number of steps in the environment for each user\n :return: list of the positions' tuples\n "
positions = list()
x_pos = random.uniform(0.0, 20.0)
y_pos = random.uniform(0.0, 10.0)
positions.append((x_pos, y_pos))
num_steps = random.randint(20, 50)
while (num_steps > 0):
new_x_pos = (x_pos + (random.choice([(- 1), 1]) * step_size))
new_y_pos = (y_pos + (random.choice([(- 1), 1]) * step_size))
if ((0.0 <= new_x_pos <= 20.0) and (0.0 <= new_y_pos <= 10.0)):
positions.append((new_x_pos, new_y_pos))
num_steps -= 1
return positions | create locations based on defined step size and number of steps in the environment for each user
:return: list of the positions' tuples | chainedSCT/extraction/location_Extraction.py | random_user_location | MSBeni/SmartContactTracing_Chained | 1 | python | @classmethod
def random_user_location(cls, step_size=0.5):
"\n create locations based on defined step size and number of steps in the environment for each user\n :return: list of the positions' tuples\n "
positions = list()
x_pos = random.uniform(0.0, 20.0)
y_pos = random.uniform(0.0, 10.0)
positions.append((x_pos, y_pos))
num_steps = random.randint(20, 50)
while (num_steps > 0):
new_x_pos = (x_pos + (random.choice([(- 1), 1]) * step_size))
new_y_pos = (y_pos + (random.choice([(- 1), 1]) * step_size))
if ((0.0 <= new_x_pos <= 20.0) and (0.0 <= new_y_pos <= 10.0)):
positions.append((new_x_pos, new_y_pos))
num_steps -= 1
return positions | @classmethod
def random_user_location(cls, step_size=0.5):
"\n create locations based on defined step size and number of steps in the environment for each user\n :return: list of the positions' tuples\n "
positions = list()
x_pos = random.uniform(0.0, 20.0)
y_pos = random.uniform(0.0, 10.0)
positions.append((x_pos, y_pos))
num_steps = random.randint(20, 50)
while (num_steps > 0):
new_x_pos = (x_pos + (random.choice([(- 1), 1]) * step_size))
new_y_pos = (y_pos + (random.choice([(- 1), 1]) * step_size))
if ((0.0 <= new_x_pos <= 20.0) and (0.0 <= new_y_pos <= 10.0)):
positions.append((new_x_pos, new_y_pos))
num_steps -= 1
return positions<|docstring|>create locations based on defined step size and number of steps in the environment for each user
:return: list of the positions' tuples<|endoftext|> |
f21e53f4b1d8dc44c94cf84e7cf521f2436944c804fae40e364649f0974af0d5 | @classmethod
def save_location_to_db(cls, argument_handler):
"\n create data specifically for all the active users and save timestamped data containing the user's locations to\n database\n :param argument_handler: imported arguments\n :return:\n "
selected_users = cls.random_users()
Location.create_locations_table()
for user in selected_users:
for j in range(argument_handler.numDays):
date_local = (datetime.today() - timedelta(days=j)).date()
xy_locations = cls.random_user_location()
for i in range(len(xy_locations)):
time_local = (datetime.now() - timedelta(seconds=5)).time()
location_ = Location(user, date_local, time_local, xy_locations[i][0], xy_locations[i][1])
location_.save_loc_to_db() | create data specifically for all the active users and save timestamped data containing the user's locations to
database
:param argument_handler: imported arguments
:return: | chainedSCT/extraction/location_Extraction.py | save_location_to_db | MSBeni/SmartContactTracing_Chained | 1 | python | @classmethod
def save_location_to_db(cls, argument_handler):
"\n create data specifically for all the active users and save timestamped data containing the user's locations to\n database\n :param argument_handler: imported arguments\n :return:\n "
selected_users = cls.random_users()
Location.create_locations_table()
for user in selected_users:
for j in range(argument_handler.numDays):
date_local = (datetime.today() - timedelta(days=j)).date()
xy_locations = cls.random_user_location()
for i in range(len(xy_locations)):
time_local = (datetime.now() - timedelta(seconds=5)).time()
location_ = Location(user, date_local, time_local, xy_locations[i][0], xy_locations[i][1])
location_.save_loc_to_db() | @classmethod
def save_location_to_db(cls, argument_handler):
"\n create data specifically for all the active users and save timestamped data containing the user's locations to\n database\n :param argument_handler: imported arguments\n :return:\n "
selected_users = cls.random_users()
Location.create_locations_table()
for user in selected_users:
for j in range(argument_handler.numDays):
date_local = (datetime.today() - timedelta(days=j)).date()
xy_locations = cls.random_user_location()
for i in range(len(xy_locations)):
time_local = (datetime.now() - timedelta(seconds=5)).time()
location_ = Location(user, date_local, time_local, xy_locations[i][0], xy_locations[i][1])
location_.save_loc_to_db()<|docstring|>create data specifically for all the active users and save timestamped data containing the user's locations to
database
:param argument_handler: imported arguments
:return:<|endoftext|> |
c44df322760ad13cc652265c8faa1f6a246a6997126933b38cbc660769b28a1b | def fixCollate(x, pass_pid=False):
'\n 1. make sure only one shape and annotation type in the batch.\n 2. add a meta of the batch.\n '
hashstat = defaultdict(list)
metas = defaultdict(list)
for i in x:
bf = i['meta'].batchflag
metas[bf].append(i.pop('meta'))
hashstat[bf].append(i)
(bf, x) = max(hashstat.items(), key=(lambda t: len(t[1])))
metas = metas[bf]
x = deep_collate(x, True, ['meta'])
x.setdefault('Yb', None)
x.setdefault('mask', None)
x['meta'] = {'batchflag': bf, 'balanced': True, 'augindices': tuple((i.augmented for i in metas))}
if pass_pid:
x['meta'] = metas
return x | 1. make sure only one shape and annotation type in the batch.
2. add a meta of the batch. | src/data/dataloader.py | fixCollate | JamzumSum/yNet | 5 | python | def fixCollate(x, pass_pid=False):
'\n 1. make sure only one shape and annotation type in the batch.\n 2. add a meta of the batch.\n '
hashstat = defaultdict(list)
metas = defaultdict(list)
for i in x:
bf = i['meta'].batchflag
metas[bf].append(i.pop('meta'))
hashstat[bf].append(i)
(bf, x) = max(hashstat.items(), key=(lambda t: len(t[1])))
metas = metas[bf]
x = deep_collate(x, True, ['meta'])
x.setdefault('Yb', None)
x.setdefault('mask', None)
x['meta'] = {'batchflag': bf, 'balanced': True, 'augindices': tuple((i.augmented for i in metas))}
if pass_pid:
x['meta'] = metas
return x | def fixCollate(x, pass_pid=False):
'\n 1. make sure only one shape and annotation type in the batch.\n 2. add a meta of the batch.\n '
hashstat = defaultdict(list)
metas = defaultdict(list)
for i in x:
bf = i['meta'].batchflag
metas[bf].append(i.pop('meta'))
hashstat[bf].append(i)
(bf, x) = max(hashstat.items(), key=(lambda t: len(t[1])))
metas = metas[bf]
x = deep_collate(x, True, ['meta'])
x.setdefault('Yb', None)
x.setdefault('mask', None)
x['meta'] = {'batchflag': bf, 'balanced': True, 'augindices': tuple((i.augmented for i in metas))}
if pass_pid:
x['meta'] = metas
return x<|docstring|>1. make sure only one shape and annotation type in the batch.
2. add a meta of the batch.<|endoftext|> |
ba3ef79d2175bb59326176f7136286e40cf2cc4fa22787a1967db228ea1d403b | def load_img(self, img):
'Loads image of piece'
self.img = img | Loads image of piece | src/pieces/piece.py | load_img | pranavmodx/ChessX | 3 | python | def load_img(self, img):
self.img = img | def load_img(self, img):
self.img = img<|docstring|>Loads image of piece<|endoftext|> |
a53ba955a9df9e51dc5ec1097125affb021cd44d8c1369991531361e65c478f2 | def size(self):
'Returns size (or default size) of piece image'
try:
sz = self.img.get_height()
return sz
except:
return 75 | Returns size (or default size) of piece image | src/pieces/piece.py | size | pranavmodx/ChessX | 3 | python | def size(self):
try:
sz = self.img.get_height()
return sz
except:
return 75 | def size(self):
try:
sz = self.img.get_height()
return sz
except:
return 75<|docstring|>Returns size (or default size) of piece image<|endoftext|> |
c4225280b6a3f0aa1bd8b338004544f1e60ba38490be436c9d0d307c0937ec30 | def display(self, screen):
'Displays image on screen'
screen_obj = screen.blit(self.img, self.pos) | Displays image on screen | src/pieces/piece.py | display | pranavmodx/ChessX | 3 | python | def display(self, screen):
screen_obj = screen.blit(self.img, self.pos) | def display(self, screen):
screen_obj = screen.blit(self.img, self.pos)<|docstring|>Displays image on screen<|endoftext|> |
c466a80db6b5120bdc1a06a0271eb5dea5531374d1992adf668e012ba7b2059c | def set_pos(self, pos):
'Set initial position of piece'
self.pos = pos | Set initial position of piece | src/pieces/piece.py | set_pos | pranavmodx/ChessX | 3 | python | def set_pos(self, pos):
self.pos = pos | def set_pos(self, pos):
self.pos = pos<|docstring|>Set initial position of piece<|endoftext|> |
728e2535720ddf4df5fc50904f80c9bf84cfcc61a54167cb208ef126a3c76d8c | def move(self, pos):
'Move piece to required position'
self.pos = pos | Move piece to required position | src/pieces/piece.py | move | pranavmodx/ChessX | 3 | python | def move(self, pos):
self.pos = pos | def move(self, pos):
self.pos = pos<|docstring|>Move piece to required position<|endoftext|> |
5fafd414803ac4ac9baddab34e355ff9767d75cc850e39a3b5fec3d4dbcbcf7f | @ioflo.base.deeding.deedify(salt.utils.stringutils.to_str('SaltRaetMaintFork'), ioinits={'opts': salt.utils.stringutils.to_str('.salt.opts'), 'proc_mgr': salt.utils.stringutils.to_str('.salt.usr.proc_mgr')})
def maint_fork(self):
'\n For off the maintinence process from the master router process\n FloScript:\n\n do salt raet maint fork at enter\n '
self.proc_mgr.value.add_process(Maintenance, args=(self.opts.value,)) | For off the maintinence process from the master router process
FloScript:
do salt raet maint fork at enter | salt/daemons/flo/maint.py | maint_fork | kaelaworthen/salt | 12 | python | @ioflo.base.deeding.deedify(salt.utils.stringutils.to_str('SaltRaetMaintFork'), ioinits={'opts': salt.utils.stringutils.to_str('.salt.opts'), 'proc_mgr': salt.utils.stringutils.to_str('.salt.usr.proc_mgr')})
def maint_fork(self):
'\n For off the maintinence process from the master router process\n FloScript:\n\n do salt raet maint fork at enter\n '
self.proc_mgr.value.add_process(Maintenance, args=(self.opts.value,)) | @ioflo.base.deeding.deedify(salt.utils.stringutils.to_str('SaltRaetMaintFork'), ioinits={'opts': salt.utils.stringutils.to_str('.salt.opts'), 'proc_mgr': salt.utils.stringutils.to_str('.salt.usr.proc_mgr')})
def maint_fork(self):
'\n For off the maintinence process from the master router process\n FloScript:\n\n do salt raet maint fork at enter\n '
self.proc_mgr.value.add_process(Maintenance, args=(self.opts.value,))<|docstring|>For off the maintinence process from the master router process
FloScript:
do salt raet maint fork at enter<|endoftext|> |
0026237438c38e6afb9e353d8e1163ea02be540cff9d2c2b477aa71aec9ac092 | def run(self):
'\n Spin up a worker, do this in s multiprocess\n '
behaviors = ['salt.daemons.flo']
preloads = [(salt.utils.stringutils.to_str('.salt.opts'), dict(value=self.opts))]
console_logdir = self.opts.get('ioflo_console_logdir', '')
if console_logdir:
consolepath = os.path.join(console_logdir, 'maintenance.log')
else:
consolepath = ''
ioflo.app.run.start(name='maintenance', period=float(self.opts['loop_interval']), stamp=0.0, real=self.opts['ioflo_realtime'], filepath=self.opts['maintenance_floscript'], behaviors=behaviors, username='', password='', mode=None, houses=None, metas=None, preloads=preloads, verbose=int(self.opts['ioflo_verbose']), consolepath=consolepath) | Spin up a worker, do this in s multiprocess | salt/daemons/flo/maint.py | run | kaelaworthen/salt | 12 | python | def run(self):
'\n \n '
behaviors = ['salt.daemons.flo']
preloads = [(salt.utils.stringutils.to_str('.salt.opts'), dict(value=self.opts))]
console_logdir = self.opts.get('ioflo_console_logdir', )
if console_logdir:
consolepath = os.path.join(console_logdir, 'maintenance.log')
else:
consolepath =
ioflo.app.run.start(name='maintenance', period=float(self.opts['loop_interval']), stamp=0.0, real=self.opts['ioflo_realtime'], filepath=self.opts['maintenance_floscript'], behaviors=behaviors, username=, password=, mode=None, houses=None, metas=None, preloads=preloads, verbose=int(self.opts['ioflo_verbose']), consolepath=consolepath) | def run(self):
'\n \n '
behaviors = ['salt.daemons.flo']
preloads = [(salt.utils.stringutils.to_str('.salt.opts'), dict(value=self.opts))]
console_logdir = self.opts.get('ioflo_console_logdir', )
if console_logdir:
consolepath = os.path.join(console_logdir, 'maintenance.log')
else:
consolepath =
ioflo.app.run.start(name='maintenance', period=float(self.opts['loop_interval']), stamp=0.0, real=self.opts['ioflo_realtime'], filepath=self.opts['maintenance_floscript'], behaviors=behaviors, username=, password=, mode=None, houses=None, metas=None, preloads=preloads, verbose=int(self.opts['ioflo_verbose']), consolepath=consolepath)<|docstring|>Spin up a worker, do this in s multiprocess<|endoftext|> |
5e6be12f9cc30acc6abd3fa1201b23dcd998a2fc376d4401d6694d64ba3b1a30 | def action(self):
'\n Set up the objects used in the maint process\n '
self.fileserver.value = salt.fileserver.Fileserver(self.opts.value)
self.runners.value = salt.loader.runner(self.opts.value)
self.ckminions.value = salt.utils.minions.CkMinions(self.opts.value)
self.pillargitfs.value = salt.daemons.masterapi.init_git_pillar(self.opts.value) | Set up the objects used in the maint process | salt/daemons/flo/maint.py | action | kaelaworthen/salt | 12 | python | def action(self):
'\n \n '
self.fileserver.value = salt.fileserver.Fileserver(self.opts.value)
self.runners.value = salt.loader.runner(self.opts.value)
self.ckminions.value = salt.utils.minions.CkMinions(self.opts.value)
self.pillargitfs.value = salt.daemons.masterapi.init_git_pillar(self.opts.value) | def action(self):
'\n \n '
self.fileserver.value = salt.fileserver.Fileserver(self.opts.value)
self.runners.value = salt.loader.runner(self.opts.value)
self.ckminions.value = salt.utils.minions.CkMinions(self.opts.value)
self.pillargitfs.value = salt.daemons.masterapi.init_git_pillar(self.opts.value)<|docstring|>Set up the objects used in the maint process<|endoftext|> |
bb52ffcb101995fccb8cbe6eafdfc1682c0dac094cb66c82044a060548054426 | def action(self):
'\n Clean!\n '
salt.daemons.masterapi.clean_fsbackend(self.opts.value) | Clean! | salt/daemons/flo/maint.py | action | kaelaworthen/salt | 12 | python | def action(self):
'\n \n '
salt.daemons.masterapi.clean_fsbackend(self.opts.value) | def action(self):
'\n \n '
salt.daemons.masterapi.clean_fsbackend(self.opts.value)<|docstring|>Clean!<|endoftext|> |
bdd10865a2c6758cfd5aab4c34016d83c39b49375146d4ca0f7359a72452e7c2 | def action(self):
'\n Clear out the old jobs cache\n '
salt.daemons.masterapi.clean_old_jobs(self.opts.value) | Clear out the old jobs cache | salt/daemons/flo/maint.py | action | kaelaworthen/salt | 12 | python | def action(self):
'\n \n '
salt.daemons.masterapi.clean_old_jobs(self.opts.value) | def action(self):
'\n \n '
salt.daemons.masterapi.clean_old_jobs(self.opts.value)<|docstring|>Clear out the old jobs cache<|endoftext|> |
cf4956717ee0c0cee0ae10a60a0750dde9e868e94d884357ea8061f5691334aa | def action(self):
'\n Update!\n '
for pillargit in self.pillargitfs.value:
pillargit.update()
salt.daemons.masterapi.fileserver_update(self.fileserver.value) | Update! | salt/daemons/flo/maint.py | action | kaelaworthen/salt | 12 | python | def action(self):
'\n \n '
for pillargit in self.pillargitfs.value:
pillargit.update()
salt.daemons.masterapi.fileserver_update(self.fileserver.value) | def action(self):
'\n \n '
for pillargit in self.pillargitfs.value:
pillargit.update()
salt.daemons.masterapi.fileserver_update(self.fileserver.value)<|docstring|>Update!<|endoftext|> |
424027b04ab4a4b3544cc5e7dfac890aad797981c1f05b7c29fa0e897b2881ab | def google_plus_profile_links(iterable, icon_path=''):
'\n mainfunc\n '
(yield '<div class="widget">')
(yield '<ul id="accounts">')
for (img, link, (sitename, filename, filepath, imgurl)) in iterable:
(yield '<li>')
(yield ('<a href=%s title=%s rel="me" target="_blank">' % (_repr(link['href']), _repr(link['title']))))
(yield ('<img src=%s alt=%s width="16" height="16"></img>' % (_repr(build_data_uri(filepath, 'image/png')), _repr(sitename))))
(yield '</a>')
(yield '</li>')
(yield '</ul>')
(yield '</div>') | mainfunc | get_accounts.py | google_plus_profile_links | westurner/westurner.github.io | 0 | python | def google_plus_profile_links(iterable, icon_path=):
'\n \n '
(yield '<div class="widget">')
(yield '<ul id="accounts">')
for (img, link, (sitename, filename, filepath, imgurl)) in iterable:
(yield '<li>')
(yield ('<a href=%s title=%s rel="me" target="_blank">' % (_repr(link['href']), _repr(link['title']))))
(yield ('<img src=%s alt=%s width="16" height="16"></img>' % (_repr(build_data_uri(filepath, 'image/png')), _repr(sitename))))
(yield '</a>')
(yield '</li>')
(yield '</ul>')
(yield '</div>') | def google_plus_profile_links(iterable, icon_path=):
'\n \n '
(yield '<div class="widget">')
(yield '<ul id="accounts">')
for (img, link, (sitename, filename, filepath, imgurl)) in iterable:
(yield '<li>')
(yield ('<a href=%s title=%s rel="me" target="_blank">' % (_repr(link['href']), _repr(link['title']))))
(yield ('<img src=%s alt=%s width="16" height="16"></img>' % (_repr(build_data_uri(filepath, 'image/png')), _repr(sitename))))
(yield '</a>')
(yield '</li>')
(yield '</ul>')
(yield '</div>')<|docstring|>mainfunc<|endoftext|> |
923947e780bc8f4de16896649b7d2e469c84c36b394f4ea3e957b768e6d74ad2 | def reference2array(path):
'this function allows you read in hyperspectral reference in raw format and returns it as array that is averaged\n (this will be used to normalize the raw hyperspectral image)\n Inputs:\n path = path to the raw file of reference\n\n Returns:\n image_array_all = hyperspectral reference image in array format\n gdalhyper = hyperspectral reference image\n pixelWidth = pixelWidth\n cols = number of cols of raw image\n rows = number of rows of raw image\n bands = number of bands of raw image\n\n\n :param hyperimg: spectral object\n :param bands: list of band centers\n :param path: string\n :return filname: string\n '
device += 1
if (os.path.isfile(path) == False):
fatal_error((str(path) + ' does not exist'))
gdalhyper = gdal.Open(path, GA_ReadOnly)
if (gdalhyper is None):
print(("Couldn't open this file: " + path))
sys.exit('Try again!')
else:
print(('%s opened successfully' % path))
print('Get image size')
cols = gdalhyper.RasterXSize
rows = gdalhyper.RasterYSize
bands = gdalhyper.RasterCount
print(('columns: %i' % cols))
print(('rows: %i' % rows))
print(('bands: %i' % bands))
print('Get georeference information')
geotransform = gdalhyper.GetGeoTransform()
originX = geotransform[0]
originY = geotransform[3]
pixelWidth = geotransform[1]
pixelHeight = geotransform[5]
print(('origin x: %i' % originX))
print(('origin y: %i' % originY))
print(('width: %2.2f' % pixelWidth))
print(('height: %2.2f' % pixelHeight))
print('Convert image to 2D array')
band = gdalhyper.GetRasterBand(1)
image_array = band.ReadAsArray(0, 0, cols, rows)
image_array_name = path
print(type(image_array))
print(image_array.shape)
output_list = []
for i in range(1, (bands + 1)):
band = gdalhyper.GetRasterBand(i)
image_array = band.ReadAsArray(0, 0, cols, rows)
for y in zip(*image_array):
avg_reflectance = (sum(y) / len(y))
output_list.append(avg_reflectance)
image_array_ave = np.reshape(output_list, (bands, cols))
print('Average image width')
print(image_array_ave.shape)
return (image_array_all, gdalhyper, cols, rows, bands) | this function allows you read in hyperspectral reference in raw format and returns it as array that is averaged
(this will be used to normalize the raw hyperspectral image)
Inputs:
path = path to the raw file of reference
Returns:
image_array_all = hyperspectral reference image in array format
gdalhyper = hyperspectral reference image
pixelWidth = pixelWidth
cols = number of cols of raw image
rows = number of rows of raw image
bands = number of bands of raw image
:param hyperimg: spectral object
:param bands: list of band centers
:param path: string
:return filname: string | plantcv/hyperspectral/reference2array.py | reference2array | danforthcenter/plantcv-hyperspectral | 1 | python | def reference2array(path):
'this function allows you read in hyperspectral reference in raw format and returns it as array that is averaged\n (this will be used to normalize the raw hyperspectral image)\n Inputs:\n path = path to the raw file of reference\n\n Returns:\n image_array_all = hyperspectral reference image in array format\n gdalhyper = hyperspectral reference image\n pixelWidth = pixelWidth\n cols = number of cols of raw image\n rows = number of rows of raw image\n bands = number of bands of raw image\n\n\n :param hyperimg: spectral object\n :param bands: list of band centers\n :param path: string\n :return filname: string\n '
device += 1
if (os.path.isfile(path) == False):
fatal_error((str(path) + ' does not exist'))
gdalhyper = gdal.Open(path, GA_ReadOnly)
if (gdalhyper is None):
print(("Couldn't open this file: " + path))
sys.exit('Try again!')
else:
print(('%s opened successfully' % path))
print('Get image size')
cols = gdalhyper.RasterXSize
rows = gdalhyper.RasterYSize
bands = gdalhyper.RasterCount
print(('columns: %i' % cols))
print(('rows: %i' % rows))
print(('bands: %i' % bands))
print('Get georeference information')
geotransform = gdalhyper.GetGeoTransform()
originX = geotransform[0]
originY = geotransform[3]
pixelWidth = geotransform[1]
pixelHeight = geotransform[5]
print(('origin x: %i' % originX))
print(('origin y: %i' % originY))
print(('width: %2.2f' % pixelWidth))
print(('height: %2.2f' % pixelHeight))
print('Convert image to 2D array')
band = gdalhyper.GetRasterBand(1)
image_array = band.ReadAsArray(0, 0, cols, rows)
image_array_name = path
print(type(image_array))
print(image_array.shape)
output_list = []
for i in range(1, (bands + 1)):
band = gdalhyper.GetRasterBand(i)
image_array = band.ReadAsArray(0, 0, cols, rows)
for y in zip(*image_array):
avg_reflectance = (sum(y) / len(y))
output_list.append(avg_reflectance)
image_array_ave = np.reshape(output_list, (bands, cols))
print('Average image width')
print(image_array_ave.shape)
return (image_array_all, gdalhyper, cols, rows, bands) | def reference2array(path):
'this function allows you read in hyperspectral reference in raw format and returns it as array that is averaged\n (this will be used to normalize the raw hyperspectral image)\n Inputs:\n path = path to the raw file of reference\n\n Returns:\n image_array_all = hyperspectral reference image in array format\n gdalhyper = hyperspectral reference image\n pixelWidth = pixelWidth\n cols = number of cols of raw image\n rows = number of rows of raw image\n bands = number of bands of raw image\n\n\n :param hyperimg: spectral object\n :param bands: list of band centers\n :param path: string\n :return filname: string\n '
device += 1
if (os.path.isfile(path) == False):
fatal_error((str(path) + ' does not exist'))
gdalhyper = gdal.Open(path, GA_ReadOnly)
if (gdalhyper is None):
print(("Couldn't open this file: " + path))
sys.exit('Try again!')
else:
print(('%s opened successfully' % path))
print('Get image size')
cols = gdalhyper.RasterXSize
rows = gdalhyper.RasterYSize
bands = gdalhyper.RasterCount
print(('columns: %i' % cols))
print(('rows: %i' % rows))
print(('bands: %i' % bands))
print('Get georeference information')
geotransform = gdalhyper.GetGeoTransform()
originX = geotransform[0]
originY = geotransform[3]
pixelWidth = geotransform[1]
pixelHeight = geotransform[5]
print(('origin x: %i' % originX))
print(('origin y: %i' % originY))
print(('width: %2.2f' % pixelWidth))
print(('height: %2.2f' % pixelHeight))
print('Convert image to 2D array')
band = gdalhyper.GetRasterBand(1)
image_array = band.ReadAsArray(0, 0, cols, rows)
image_array_name = path
print(type(image_array))
print(image_array.shape)
output_list = []
for i in range(1, (bands + 1)):
band = gdalhyper.GetRasterBand(i)
image_array = band.ReadAsArray(0, 0, cols, rows)
for y in zip(*image_array):
avg_reflectance = (sum(y) / len(y))
output_list.append(avg_reflectance)
image_array_ave = np.reshape(output_list, (bands, cols))
print('Average image width')
print(image_array_ave.shape)
return (image_array_all, gdalhyper, cols, rows, bands)<|docstring|>this function allows you read in hyperspectral reference in raw format and returns it as array that is averaged
(this will be used to normalize the raw hyperspectral image)
Inputs:
path = path to the raw file of reference
Returns:
image_array_all = hyperspectral reference image in array format
gdalhyper = hyperspectral reference image
pixelWidth = pixelWidth
cols = number of cols of raw image
rows = number of rows of raw image
bands = number of bands of raw image
:param hyperimg: spectral object
:param bands: list of band centers
:param path: string
:return filname: string<|endoftext|> |
153690b3eae9fab807e01e5e31143a00808470201a0d7b94e353aec02554df2a | def main_scan():
'\n Parser from terminal with\n $ python2 bruker2nifti_scan -h\n $ python2 bruker2nifti_scan -i input_file_path -o output_file_path\n '
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_scan_folder', dest='pfo_input', type=str, required=True, help='Bruker scan folder.')
parser.add_argument('-o', '--output_scan_folder', dest='pfo_output', type=str, required=True, help='Output folder where the study will be saved.')
parser.add_argument('--fin_output', dest='fin_output', type=str, default=None)
parser.add_argument('-nifti_version', dest='nifti_version', type=int, default=1, help='Filename of the nifti output.')
parser.add_argument('-qform_code', dest='qform_code', type=int, default=2)
parser.add_argument('-sform_code', dest='sform_code', type=int, default=1)
parser.add_argument('-do_not_save_npy', dest='do_not_save_npy', action='store_true')
parser.add_argument('-do_not_save_human_readable', dest='do_not_save_human_readable', action='store_true')
parser.add_argument('-correct_visu_slope', dest='correct_visu_slope', action='store_true')
parser.add_argument('-correct_reco_slope', dest='correct_reco_slope', action='store_true')
parser.add_argument('-apply_matrix', dest='user_matrix', type=str, default=None)
parser.add_argument('-verbose', '-v', dest='verbose', type=int, default=1)
args = parser.parse_args()
bruconv = Bruker2Nifti(os.path.dirname(args.pfo_input), args.pfo_output)
bruconv.nifti_version = args.nifti_version
bruconv.qform_code = args.qform_code
bruconv.sform_code = args.sform_code
bruconv.save_npy = (not args.do_not_save_npy)
bruconv.save_human_readable = (not args.do_not_save_human_readable)
bruconv.correct_visu_slope = args.correct_visu_slope
bruconv.correct_reco_slope = args.correct_reco_slope
bruconv.user_matrix = args.user_matrix
bruconv.verbose = args.verbose
if (parser.add_argument > 0):
print('\nConverter parameters: ')
print('-------------------------------------------------------- ')
print('Study Folder : {}'.format(os.path.dirname(args.pfo_input)))
print('Scan to convert : {}'.format(os.path.basename(args.pfo_input)))
print('List of scans : {}'.format(bruconv.scans_list))
print('Output NifTi version : {}'.format(bruconv.nifti_version))
print('Output NifTi q-form : {}'.format(bruconv.qform_code))
print('Output NifTi s-form : {}'.format(bruconv.sform_code))
print('Save npy : {}'.format(bruconv.save_npy))
print('Save human readable : {}'.format(bruconv.save_human_readable))
print('Correct the visu_slope : {}'.format(bruconv.correct_visu_slope))
print('Correct the reco_slope : {}'.format(bruconv.correct_reco_slope))
print('Apply matrix : {}'.format(bruconv.user_matrix))
print('-------------------------------------------------------- ')
bruconv.convert_scan(args.pfo_input, args.pfo_output, nifti_file_name=args.fin_output, create_output_folder_if_not_exists=True) | Parser from terminal with
$ python2 bruker2nifti_scan -h
$ python2 bruker2nifti_scan -i input_file_path -o output_file_path | bruker2nifti/parsers/bruker2nii_scan.py | main_scan | neuroanatomy/bruker2nifti | 0 | python | def main_scan():
'\n Parser from terminal with\n $ python2 bruker2nifti_scan -h\n $ python2 bruker2nifti_scan -i input_file_path -o output_file_path\n '
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_scan_folder', dest='pfo_input', type=str, required=True, help='Bruker scan folder.')
parser.add_argument('-o', '--output_scan_folder', dest='pfo_output', type=str, required=True, help='Output folder where the study will be saved.')
parser.add_argument('--fin_output', dest='fin_output', type=str, default=None)
parser.add_argument('-nifti_version', dest='nifti_version', type=int, default=1, help='Filename of the nifti output.')
parser.add_argument('-qform_code', dest='qform_code', type=int, default=2)
parser.add_argument('-sform_code', dest='sform_code', type=int, default=1)
parser.add_argument('-do_not_save_npy', dest='do_not_save_npy', action='store_true')
parser.add_argument('-do_not_save_human_readable', dest='do_not_save_human_readable', action='store_true')
parser.add_argument('-correct_visu_slope', dest='correct_visu_slope', action='store_true')
parser.add_argument('-correct_reco_slope', dest='correct_reco_slope', action='store_true')
parser.add_argument('-apply_matrix', dest='user_matrix', type=str, default=None)
parser.add_argument('-verbose', '-v', dest='verbose', type=int, default=1)
args = parser.parse_args()
bruconv = Bruker2Nifti(os.path.dirname(args.pfo_input), args.pfo_output)
bruconv.nifti_version = args.nifti_version
bruconv.qform_code = args.qform_code
bruconv.sform_code = args.sform_code
bruconv.save_npy = (not args.do_not_save_npy)
bruconv.save_human_readable = (not args.do_not_save_human_readable)
bruconv.correct_visu_slope = args.correct_visu_slope
bruconv.correct_reco_slope = args.correct_reco_slope
bruconv.user_matrix = args.user_matrix
bruconv.verbose = args.verbose
if (parser.add_argument > 0):
print('\nConverter parameters: ')
print('-------------------------------------------------------- ')
print('Study Folder : {}'.format(os.path.dirname(args.pfo_input)))
print('Scan to convert : {}'.format(os.path.basename(args.pfo_input)))
print('List of scans : {}'.format(bruconv.scans_list))
print('Output NifTi version : {}'.format(bruconv.nifti_version))
print('Output NifTi q-form : {}'.format(bruconv.qform_code))
print('Output NifTi s-form : {}'.format(bruconv.sform_code))
print('Save npy : {}'.format(bruconv.save_npy))
print('Save human readable : {}'.format(bruconv.save_human_readable))
print('Correct the visu_slope : {}'.format(bruconv.correct_visu_slope))
print('Correct the reco_slope : {}'.format(bruconv.correct_reco_slope))
print('Apply matrix : {}'.format(bruconv.user_matrix))
print('-------------------------------------------------------- ')
bruconv.convert_scan(args.pfo_input, args.pfo_output, nifti_file_name=args.fin_output, create_output_folder_if_not_exists=True) | def main_scan():
'\n Parser from terminal with\n $ python2 bruker2nifti_scan -h\n $ python2 bruker2nifti_scan -i input_file_path -o output_file_path\n '
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_scan_folder', dest='pfo_input', type=str, required=True, help='Bruker scan folder.')
parser.add_argument('-o', '--output_scan_folder', dest='pfo_output', type=str, required=True, help='Output folder where the study will be saved.')
parser.add_argument('--fin_output', dest='fin_output', type=str, default=None)
parser.add_argument('-nifti_version', dest='nifti_version', type=int, default=1, help='Filename of the nifti output.')
parser.add_argument('-qform_code', dest='qform_code', type=int, default=2)
parser.add_argument('-sform_code', dest='sform_code', type=int, default=1)
parser.add_argument('-do_not_save_npy', dest='do_not_save_npy', action='store_true')
parser.add_argument('-do_not_save_human_readable', dest='do_not_save_human_readable', action='store_true')
parser.add_argument('-correct_visu_slope', dest='correct_visu_slope', action='store_true')
parser.add_argument('-correct_reco_slope', dest='correct_reco_slope', action='store_true')
parser.add_argument('-apply_matrix', dest='user_matrix', type=str, default=None)
parser.add_argument('-verbose', '-v', dest='verbose', type=int, default=1)
args = parser.parse_args()
bruconv = Bruker2Nifti(os.path.dirname(args.pfo_input), args.pfo_output)
bruconv.nifti_version = args.nifti_version
bruconv.qform_code = args.qform_code
bruconv.sform_code = args.sform_code
bruconv.save_npy = (not args.do_not_save_npy)
bruconv.save_human_readable = (not args.do_not_save_human_readable)
bruconv.correct_visu_slope = args.correct_visu_slope
bruconv.correct_reco_slope = args.correct_reco_slope
bruconv.user_matrix = args.user_matrix
bruconv.verbose = args.verbose
if (parser.add_argument > 0):
print('\nConverter parameters: ')
print('-------------------------------------------------------- ')
print('Study Folder : {}'.format(os.path.dirname(args.pfo_input)))
print('Scan to convert : {}'.format(os.path.basename(args.pfo_input)))
print('List of scans : {}'.format(bruconv.scans_list))
print('Output NifTi version : {}'.format(bruconv.nifti_version))
print('Output NifTi q-form : {}'.format(bruconv.qform_code))
print('Output NifTi s-form : {}'.format(bruconv.sform_code))
print('Save npy : {}'.format(bruconv.save_npy))
print('Save human readable : {}'.format(bruconv.save_human_readable))
print('Correct the visu_slope : {}'.format(bruconv.correct_visu_slope))
print('Correct the reco_slope : {}'.format(bruconv.correct_reco_slope))
print('Apply matrix : {}'.format(bruconv.user_matrix))
print('-------------------------------------------------------- ')
bruconv.convert_scan(args.pfo_input, args.pfo_output, nifti_file_name=args.fin_output, create_output_folder_if_not_exists=True)<|docstring|>Parser from terminal with
$ python2 bruker2nifti_scan -h
$ python2 bruker2nifti_scan -i input_file_path -o output_file_path<|endoftext|> |
c1d48b5c1acf408253ef538b8f21b1d468fc58c94393d260d25d1e34c85d4474 | def load_tags(self):
'Loads dictionary of tags for further use in BW2'
filename = 'tags.csv'
filepath = (DATA_DIR / filename)
if (not filepath.is_file()):
raise FileNotFoundError('The dictionary of tags could not be found.')
with open(filepath) as f:
csv_list = [[val.strip() for val in r.split(';')] for r in f.readlines()]
data = csv_list
dict_tags = {}
for row in data:
(name, tag) = row
dict_tags[name] = tag
return dict_tags | Loads dictionary of tags for further use in BW2 | carculator/export.py | load_tags | SimonVoelker/carculator | 0 | python | def load_tags(self):
filename = 'tags.csv'
filepath = (DATA_DIR / filename)
if (not filepath.is_file()):
raise FileNotFoundError('The dictionary of tags could not be found.')
with open(filepath) as f:
csv_list = [[val.strip() for val in r.split(';')] for r in f.readlines()]
data = csv_list
dict_tags = {}
for row in data:
(name, tag) = row
dict_tags[name] = tag
return dict_tags | def load_tags(self):
filename = 'tags.csv'
filepath = (DATA_DIR / filename)
if (not filepath.is_file()):
raise FileNotFoundError('The dictionary of tags could not be found.')
with open(filepath) as f:
csv_list = [[val.strip() for val in r.split(';')] for r in f.readlines()]
data = csv_list
dict_tags = {}
for row in data:
(name, tag) = row
dict_tags[name] = tag
return dict_tags<|docstring|>Loads dictionary of tags for further use in BW2<|endoftext|> |
a8f48a7e689fca6215efc4353460f53ad34516558dc162f577566b481fb36caf | def load_mapping_36_to_uvek(self):
'Load mapping dictionary between ecoinvent 3.6 and UVEK'
filename = 'uvek_mapping.csv'
filepath = (DATA_DIR / filename)
if (not filepath.is_file()):
raise FileNotFoundError('The dictionary of activities flows match between ecoinvent 3.6 and UVEK could not be found.')
with open(filepath) as f:
csv_list = [[val.strip() for val in r.split(';')] for r in f.readlines()]
((_, _, *header), *data) = csv_list
dict_uvek = {}
for row in data:
(name, ref_prod, unit, location, uvek_name, uvek_ref_prod, uvek_unit, uvek_loc) = row
dict_uvek[(name, ref_prod, unit, location)] = (uvek_name, uvek_ref_prod, uvek_unit, uvek_loc)
return dict_uvek | Load mapping dictionary between ecoinvent 3.6 and UVEK | carculator/export.py | load_mapping_36_to_uvek | SimonVoelker/carculator | 0 | python | def load_mapping_36_to_uvek(self):
filename = 'uvek_mapping.csv'
filepath = (DATA_DIR / filename)
if (not filepath.is_file()):
raise FileNotFoundError('The dictionary of activities flows match between ecoinvent 3.6 and UVEK could not be found.')
with open(filepath) as f:
csv_list = [[val.strip() for val in r.split(';')] for r in f.readlines()]
((_, _, *header), *data) = csv_list
dict_uvek = {}
for row in data:
(name, ref_prod, unit, location, uvek_name, uvek_ref_prod, uvek_unit, uvek_loc) = row
dict_uvek[(name, ref_prod, unit, location)] = (uvek_name, uvek_ref_prod, uvek_unit, uvek_loc)
return dict_uvek | def load_mapping_36_to_uvek(self):
filename = 'uvek_mapping.csv'
filepath = (DATA_DIR / filename)
if (not filepath.is_file()):
raise FileNotFoundError('The dictionary of activities flows match between ecoinvent 3.6 and UVEK could not be found.')
with open(filepath) as f:
csv_list = [[val.strip() for val in r.split(';')] for r in f.readlines()]
((_, _, *header), *data) = csv_list
dict_uvek = {}
for row in data:
(name, ref_prod, unit, location, uvek_name, uvek_ref_prod, uvek_unit, uvek_loc) = row
dict_uvek[(name, ref_prod, unit, location)] = (uvek_name, uvek_ref_prod, uvek_unit, uvek_loc)
return dict_uvek<|docstring|>Load mapping dictionary between ecoinvent 3.6 and UVEK<|endoftext|> |
271a1aadcf9f18fa2e4d7cf3dab5ed84bf07752d546e40276e0d04b6a3c219eb | def write_lci(self, presamples, ecoinvent_compatibility, ecoinvent_version):
'\n Return the inventory as a dictionary\n If if there several values for one exchange, uncertainty information is generated.\n If `presamples` is True, returns the inventory as well as a `presamples` matrix.\n If `presamples` is False, returns the inventory with characterized uncertainty information.\n If `ecoinvent_compatibility` is True, the inventory is made compatible with ecoinvent. If False,\n the inventory is compatible with the REMIND-ecoinvent hybrid database output of the `rmnd_lca` library.\n\n :returns: a dictionary that contains all the exchanges\n :rtype: dict\n '
activities_to_be_removed = ['algae cultivation | algae broth production', 'algae harvesting| dry algae production', 'transport, pipeline, supercritical CO2, 200km w/o recompression', 'Ethanol from maize starch', 'Natural Gas provision (at medium pressure grid) {RER}, EU mix', 'woodchips from forestry residues', 'Ethanol from wheat straw pellets', 'straw pellets', 'Biodiesel from cooking oil', 'Straw bales | baling of straw', 'CO2 storage/natural gas, post, 200km pipeline, storage 1000m/2025', 'drilling, deep borehole/m', 'Sugar beet cultivation {RER} | sugar beet production Europe | Alloc Rec, U', 'Refined Waste Cooking Oil {RER} | Refining of waste cooking oil Europe | Alloc Rec, U', 'Ethanol from forest residues', 'Ethanol from sugarbeet', 'pipeline, supercritical CO2/km', 'Biodiesel from algae', 'Maize cultivation, drying and storage {RER} | Maize production Europe | Alloc Rec, U', 'Fischer Tropsch reactor and upgrading plant, construction', 'Walls and foundations, for hydrogen refuelling station', 'container, with pipes and fittings, for diaphragm compressor', 'RWGS tank construction', 'storage module, high pressure, at fuelling station', 'pumps, carbon dioxide capture process', 'PEM electrolyzer, Operation and Maintenance', 'heat exchanger, carbon dioxide capture process', 'biogas upgrading - sewage sludge - amine scrubbing - best', 'Hydrogen refuelling station, SMR', 'Hydrogen, gaseous, 700 bar, from SMR NG w/o CCS, at H2 fuelling station', 'transformer and rectifier unit, for electrolyzer', 'PEM electrolyzer, ACDC Converter', 'carbon dioxide, captured from atmosphere', 'PEM electrolyzer, Balance of Plant', 'Sabatier reaction methanation unit', 'PEM electrolyzer, Stack', 'hot water tank, carbon dioxide capture process', 'cooling unit, carbon dioxide capture process', 'diaphragm compressor module, high pressure', 'carbon dioxide capture system', 'Hydrogen dispenser, for gaseous hydrogen', 'diaphragms, for diaphragm compressor', 'MTG production facility, construction', 'Disposal, hydrogen fuelling station', 'production of 2 wt-% potassium iodide solution', 'production of nickle-based catalyst for methanation', 'wiring and tubing, carbon dioxide capture process', 'control panel, carbon dioxide capture process', 'adsorption and desorption unit, carbon dioxide capture process', 'Buffer tank', 'frequency converter, for diaphragm compressor', 'Hydrogen, gaseous, 30 bar, from hard coal gasification and reforming, at coal gasification plant', 'Methanol distillation', 'CO2 storage/at H2 production plant, pre, pipeline 200km, storage 1000m', 'Syngas, RWGS, Production', 'softwood forestry, mixed species, sustainable forest management, CF = -1', 'hardwood forestry, mixed species, sustainable forest management, CF = -1', 'Hydrogen, gaseous, 25 bar, from dual fluidised bed gasification of woody biomass with CCS, at gasification plant', 'market for wood chips, wet, measured as dry mass, CF = -1', 'Hydrogen, gaseous, 700 bar, from electrolysis, at H2 fuelling station', 'Hydrogen, gaseous, 25 bar, from electrolysis', 'Hydrogen, gaseous, 700 bar, from dual fluidised bed gasification of woody biomass with CCS, at H2 fuelling station', 'SMR BM, HT+LT, + CCS (MDEA), 98 (average), digestate incineration, 26 bar', 'Hydrogen, gaseous, 700 bar, from SMR of biogas, at H2 fuelling station', 'SMR NG + CCS (MDEA), 98 (average), 25 bar', 'SMR BM, HT+LT, with digestate incineration, 26 bar', 'Hydrogen, gaseous, 700 bar, from dual fluidised bed gasification of woody biomass, at H2 fuelling station', 'Hydrogen, gaseous, 700 bar, from SMR of biogas with CCS, at H2 fuelling station', 'Hydrogen, gaseous, 700 bar, from SMR NG w CCS, at H2 fuelling station', 'SMR NG + CCS (MDEA), 98 (average), 700 bar', 'Hydrogen, gaseous, 25 bar, from dual fluidised bed gasification of woody biomass, at gasification plant', 'Methanol Synthesis', 'Diesel production, synthetic, Fischer Tropsch process', 'Gasoline production, synthetic, from methanol']
uvek_activities_to_remove = ['market for activated carbon, granular', 'market for iodine', 'market for manganese sulfate', 'market for molybdenum trioxide', 'market for nickel sulfate', 'market for soda ash, light, crystalline, heptahydrate']
ei35_activities_to_remove = ['latex production']
uvek_multiplication_factors = {'Steam, for chemical processes, at plant': (1 / 2.257), 'Natural gas, from high pressure network (1-5 bar), at service station': 0.842, 'Disposal, passenger car': (1 / 1600)}
list_act = []
if presamples:
presamples_matrix = []
non_zeroes = np.nonzero(self.array[(0, :, :)])
(u, c) = np.unique(non_zeroes[1], return_counts=True)
dup = u[(c > 1)]
coords = np.column_stack((non_zeroes[0][np.isin(non_zeroes[1], dup)], non_zeroes[1][np.isin(non_zeroes[1], dup)]))
bar = pyprind.ProgBar(len(dup))
for d in dup:
bar.update(item_id=d)
list_exc = []
for (row, col) in coords[(coords[(:, 1)] == d)]:
tuple_output = self.indices[col]
tuple_input = self.indices[row]
mult_factor = 1
if ((ecoinvent_compatibility == False) and (tuple_output[0] in activities_to_be_removed)):
break
if (ecoinvent_compatibility == False):
tuple_output = self.map_ecoinvent_remind.get(tuple_output, tuple_output)
tuple_input = self.map_ecoinvent_remind.get(tuple_input, tuple_input)
if (ecoinvent_compatibility == True):
tuple_output = self.map_remind_ecoinvent.get(tuple_output, tuple_output)
tuple_input = self.map_remind_ecoinvent.get(tuple_input, tuple_input)
if (ecoinvent_version == '3.5'):
tuple_output = self.map_36_to_35.get(tuple_output, tuple_output)
tuple_input = self.map_36_to_35.get(tuple_input, tuple_input)
if (tuple_output[0] in ei35_activities_to_remove):
continue
if (tuple_input[0] in ei35_activities_to_remove):
continue
if (ecoinvent_version == 'uvek'):
tuple_output = self.map_36_to_uvek.get(tuple_output, tuple_output)
if (tuple_input[0] in uvek_activities_to_remove):
continue
else:
tuple_input = self.map_36_to_uvek.get(tuple_input, tuple_input)
if (tuple_input[0] in uvek_multiplication_factors):
mult_factor = uvek_multiplication_factors[tuple_input[0]]
if (len(self.array[(:, row, col)]) == 1):
amount = (self.array[(0, row, col)] * mult_factor)
uncertainty = [('uncertainty type', 1)]
elif np.all(np.isclose(self.array[(:, row, col)], self.array[(0, row, col)])):
amount = (self.array[(0, row, col)] * mult_factor)
uncertainty = [('uncertainty type', 1)]
elif (presamples == True):
amount = (np.median(self.array[(:, row, col)]) * mult_factor)
uncertainty = [('uncertainty type', 1)]
if (len(tuple_input) > 3):
type_exc = 'technosphere'
else:
type_exc = 'biosphere'
presamples_matrix.append(((self.array[(:, row, col)] * (- 1)), [(tuple_input, tuple_output, type_exc)], type_exc))
tag = [self.tags[t] for t in list(self.tags.keys()) if (t in tuple_input[0])]
if (len(tag) > 0):
tag = tag[0]
else:
tag = 'other'
if (tuple_output == tuple_input):
list_exc.append({'name': tuple_output[0], 'database': self.db_name, 'amount': amount, 'unit': tuple_output[2], 'type': 'production', 'location': tuple_output[1], 'reference product': tuple_output[3]})
list_exc[(- 1)].update(uncertainty)
elif (len(tuple_input) > 3):
list_exc.append({'name': tuple_input[0], 'database': self.db_name, 'amount': (amount * (- 1)), 'unit': tuple_input[2], 'type': 'technosphere', 'location': tuple_input[1], 'reference product': tuple_input[3], 'tag': tag})
list_exc[(- 1)].update(uncertainty)
else:
list_exc.append({'name': tuple_input[0], 'database': 'biosphere3', 'amount': (amount * (- 1)), 'unit': tuple_input[2], 'type': 'biosphere', 'categories': tuple_input[1], 'tag': tag})
list_exc[(- 1)].update(uncertainty)
else:
tag = [self.tags[t] for t in list(self.tags.keys()) if (t in tuple_output[0])]
if (len(tag) > 0):
tag = tag[0]
else:
tag = 'other'
list_act.append({'production amount': 1, 'database': self.db_name, 'name': tuple_output[0], 'unit': tuple_output[2], 'location': tuple_output[1], 'exchanges': list_exc, 'reference product': tuple_output[3], 'type': 'process', 'code': str(uuid.uuid1()), 'tag': tag})
if presamples:
return (list_act, presamples_matrix)
else:
return list_act | Return the inventory as a dictionary
If if there several values for one exchange, uncertainty information is generated.
If `presamples` is True, returns the inventory as well as a `presamples` matrix.
If `presamples` is False, returns the inventory with characterized uncertainty information.
If `ecoinvent_compatibility` is True, the inventory is made compatible with ecoinvent. If False,
the inventory is compatible with the REMIND-ecoinvent hybrid database output of the `rmnd_lca` library.
:returns: a dictionary that contains all the exchanges
:rtype: dict | carculator/export.py | write_lci | SimonVoelker/carculator | 0 | python | def write_lci(self, presamples, ecoinvent_compatibility, ecoinvent_version):
'\n Return the inventory as a dictionary\n If if there several values for one exchange, uncertainty information is generated.\n If `presamples` is True, returns the inventory as well as a `presamples` matrix.\n If `presamples` is False, returns the inventory with characterized uncertainty information.\n If `ecoinvent_compatibility` is True, the inventory is made compatible with ecoinvent. If False,\n the inventory is compatible with the REMIND-ecoinvent hybrid database output of the `rmnd_lca` library.\n\n :returns: a dictionary that contains all the exchanges\n :rtype: dict\n '
activities_to_be_removed = ['algae cultivation | algae broth production', 'algae harvesting| dry algae production', 'transport, pipeline, supercritical CO2, 200km w/o recompression', 'Ethanol from maize starch', 'Natural Gas provision (at medium pressure grid) {RER}, EU mix', 'woodchips from forestry residues', 'Ethanol from wheat straw pellets', 'straw pellets', 'Biodiesel from cooking oil', 'Straw bales | baling of straw', 'CO2 storage/natural gas, post, 200km pipeline, storage 1000m/2025', 'drilling, deep borehole/m', 'Sugar beet cultivation {RER} | sugar beet production Europe | Alloc Rec, U', 'Refined Waste Cooking Oil {RER} | Refining of waste cooking oil Europe | Alloc Rec, U', 'Ethanol from forest residues', 'Ethanol from sugarbeet', 'pipeline, supercritical CO2/km', 'Biodiesel from algae', 'Maize cultivation, drying and storage {RER} | Maize production Europe | Alloc Rec, U', 'Fischer Tropsch reactor and upgrading plant, construction', 'Walls and foundations, for hydrogen refuelling station', 'container, with pipes and fittings, for diaphragm compressor', 'RWGS tank construction', 'storage module, high pressure, at fuelling station', 'pumps, carbon dioxide capture process', 'PEM electrolyzer, Operation and Maintenance', 'heat exchanger, carbon dioxide capture process', 'biogas upgrading - sewage sludge - amine scrubbing - best', 'Hydrogen refuelling station, SMR', 'Hydrogen, gaseous, 700 bar, from SMR NG w/o CCS, at H2 fuelling station', 'transformer and rectifier unit, for electrolyzer', 'PEM electrolyzer, ACDC Converter', 'carbon dioxide, captured from atmosphere', 'PEM electrolyzer, Balance of Plant', 'Sabatier reaction methanation unit', 'PEM electrolyzer, Stack', 'hot water tank, carbon dioxide capture process', 'cooling unit, carbon dioxide capture process', 'diaphragm compressor module, high pressure', 'carbon dioxide capture system', 'Hydrogen dispenser, for gaseous hydrogen', 'diaphragms, for diaphragm compressor', 'MTG production facility, construction', 'Disposal, hydrogen fuelling station', 'production of 2 wt-% potassium iodide solution', 'production of nickle-based catalyst for methanation', 'wiring and tubing, carbon dioxide capture process', 'control panel, carbon dioxide capture process', 'adsorption and desorption unit, carbon dioxide capture process', 'Buffer tank', 'frequency converter, for diaphragm compressor', 'Hydrogen, gaseous, 30 bar, from hard coal gasification and reforming, at coal gasification plant', 'Methanol distillation', 'CO2 storage/at H2 production plant, pre, pipeline 200km, storage 1000m', 'Syngas, RWGS, Production', 'softwood forestry, mixed species, sustainable forest management, CF = -1', 'hardwood forestry, mixed species, sustainable forest management, CF = -1', 'Hydrogen, gaseous, 25 bar, from dual fluidised bed gasification of woody biomass with CCS, at gasification plant', 'market for wood chips, wet, measured as dry mass, CF = -1', 'Hydrogen, gaseous, 700 bar, from electrolysis, at H2 fuelling station', 'Hydrogen, gaseous, 25 bar, from electrolysis', 'Hydrogen, gaseous, 700 bar, from dual fluidised bed gasification of woody biomass with CCS, at H2 fuelling station', 'SMR BM, HT+LT, + CCS (MDEA), 98 (average), digestate incineration, 26 bar', 'Hydrogen, gaseous, 700 bar, from SMR of biogas, at H2 fuelling station', 'SMR NG + CCS (MDEA), 98 (average), 25 bar', 'SMR BM, HT+LT, with digestate incineration, 26 bar', 'Hydrogen, gaseous, 700 bar, from dual fluidised bed gasification of woody biomass, at H2 fuelling station', 'Hydrogen, gaseous, 700 bar, from SMR of biogas with CCS, at H2 fuelling station', 'Hydrogen, gaseous, 700 bar, from SMR NG w CCS, at H2 fuelling station', 'SMR NG + CCS (MDEA), 98 (average), 700 bar', 'Hydrogen, gaseous, 25 bar, from dual fluidised bed gasification of woody biomass, at gasification plant', 'Methanol Synthesis', 'Diesel production, synthetic, Fischer Tropsch process', 'Gasoline production, synthetic, from methanol']
uvek_activities_to_remove = ['market for activated carbon, granular', 'market for iodine', 'market for manganese sulfate', 'market for molybdenum trioxide', 'market for nickel sulfate', 'market for soda ash, light, crystalline, heptahydrate']
ei35_activities_to_remove = ['latex production']
uvek_multiplication_factors = {'Steam, for chemical processes, at plant': (1 / 2.257), 'Natural gas, from high pressure network (1-5 bar), at service station': 0.842, 'Disposal, passenger car': (1 / 1600)}
list_act = []
if presamples:
presamples_matrix = []
non_zeroes = np.nonzero(self.array[(0, :, :)])
(u, c) = np.unique(non_zeroes[1], return_counts=True)
dup = u[(c > 1)]
coords = np.column_stack((non_zeroes[0][np.isin(non_zeroes[1], dup)], non_zeroes[1][np.isin(non_zeroes[1], dup)]))
bar = pyprind.ProgBar(len(dup))
for d in dup:
bar.update(item_id=d)
list_exc = []
for (row, col) in coords[(coords[(:, 1)] == d)]:
tuple_output = self.indices[col]
tuple_input = self.indices[row]
mult_factor = 1
if ((ecoinvent_compatibility == False) and (tuple_output[0] in activities_to_be_removed)):
break
if (ecoinvent_compatibility == False):
tuple_output = self.map_ecoinvent_remind.get(tuple_output, tuple_output)
tuple_input = self.map_ecoinvent_remind.get(tuple_input, tuple_input)
if (ecoinvent_compatibility == True):
tuple_output = self.map_remind_ecoinvent.get(tuple_output, tuple_output)
tuple_input = self.map_remind_ecoinvent.get(tuple_input, tuple_input)
if (ecoinvent_version == '3.5'):
tuple_output = self.map_36_to_35.get(tuple_output, tuple_output)
tuple_input = self.map_36_to_35.get(tuple_input, tuple_input)
if (tuple_output[0] in ei35_activities_to_remove):
continue
if (tuple_input[0] in ei35_activities_to_remove):
continue
if (ecoinvent_version == 'uvek'):
tuple_output = self.map_36_to_uvek.get(tuple_output, tuple_output)
if (tuple_input[0] in uvek_activities_to_remove):
continue
else:
tuple_input = self.map_36_to_uvek.get(tuple_input, tuple_input)
if (tuple_input[0] in uvek_multiplication_factors):
mult_factor = uvek_multiplication_factors[tuple_input[0]]
if (len(self.array[(:, row, col)]) == 1):
amount = (self.array[(0, row, col)] * mult_factor)
uncertainty = [('uncertainty type', 1)]
elif np.all(np.isclose(self.array[(:, row, col)], self.array[(0, row, col)])):
amount = (self.array[(0, row, col)] * mult_factor)
uncertainty = [('uncertainty type', 1)]
elif (presamples == True):
amount = (np.median(self.array[(:, row, col)]) * mult_factor)
uncertainty = [('uncertainty type', 1)]
if (len(tuple_input) > 3):
type_exc = 'technosphere'
else:
type_exc = 'biosphere'
presamples_matrix.append(((self.array[(:, row, col)] * (- 1)), [(tuple_input, tuple_output, type_exc)], type_exc))
tag = [self.tags[t] for t in list(self.tags.keys()) if (t in tuple_input[0])]
if (len(tag) > 0):
tag = tag[0]
else:
tag = 'other'
if (tuple_output == tuple_input):
list_exc.append({'name': tuple_output[0], 'database': self.db_name, 'amount': amount, 'unit': tuple_output[2], 'type': 'production', 'location': tuple_output[1], 'reference product': tuple_output[3]})
list_exc[(- 1)].update(uncertainty)
elif (len(tuple_input) > 3):
list_exc.append({'name': tuple_input[0], 'database': self.db_name, 'amount': (amount * (- 1)), 'unit': tuple_input[2], 'type': 'technosphere', 'location': tuple_input[1], 'reference product': tuple_input[3], 'tag': tag})
list_exc[(- 1)].update(uncertainty)
else:
list_exc.append({'name': tuple_input[0], 'database': 'biosphere3', 'amount': (amount * (- 1)), 'unit': tuple_input[2], 'type': 'biosphere', 'categories': tuple_input[1], 'tag': tag})
list_exc[(- 1)].update(uncertainty)
else:
tag = [self.tags[t] for t in list(self.tags.keys()) if (t in tuple_output[0])]
if (len(tag) > 0):
tag = tag[0]
else:
tag = 'other'
list_act.append({'production amount': 1, 'database': self.db_name, 'name': tuple_output[0], 'unit': tuple_output[2], 'location': tuple_output[1], 'exchanges': list_exc, 'reference product': tuple_output[3], 'type': 'process', 'code': str(uuid.uuid1()), 'tag': tag})
if presamples:
return (list_act, presamples_matrix)
else:
return list_act | def write_lci(self, presamples, ecoinvent_compatibility, ecoinvent_version):
'\n Return the inventory as a dictionary\n If if there several values for one exchange, uncertainty information is generated.\n If `presamples` is True, returns the inventory as well as a `presamples` matrix.\n If `presamples` is False, returns the inventory with characterized uncertainty information.\n If `ecoinvent_compatibility` is True, the inventory is made compatible with ecoinvent. If False,\n the inventory is compatible with the REMIND-ecoinvent hybrid database output of the `rmnd_lca` library.\n\n :returns: a dictionary that contains all the exchanges\n :rtype: dict\n '
activities_to_be_removed = ['algae cultivation | algae broth production', 'algae harvesting| dry algae production', 'transport, pipeline, supercritical CO2, 200km w/o recompression', 'Ethanol from maize starch', 'Natural Gas provision (at medium pressure grid) {RER}, EU mix', 'woodchips from forestry residues', 'Ethanol from wheat straw pellets', 'straw pellets', 'Biodiesel from cooking oil', 'Straw bales | baling of straw', 'CO2 storage/natural gas, post, 200km pipeline, storage 1000m/2025', 'drilling, deep borehole/m', 'Sugar beet cultivation {RER} | sugar beet production Europe | Alloc Rec, U', 'Refined Waste Cooking Oil {RER} | Refining of waste cooking oil Europe | Alloc Rec, U', 'Ethanol from forest residues', 'Ethanol from sugarbeet', 'pipeline, supercritical CO2/km', 'Biodiesel from algae', 'Maize cultivation, drying and storage {RER} | Maize production Europe | Alloc Rec, U', 'Fischer Tropsch reactor and upgrading plant, construction', 'Walls and foundations, for hydrogen refuelling station', 'container, with pipes and fittings, for diaphragm compressor', 'RWGS tank construction', 'storage module, high pressure, at fuelling station', 'pumps, carbon dioxide capture process', 'PEM electrolyzer, Operation and Maintenance', 'heat exchanger, carbon dioxide capture process', 'biogas upgrading - sewage sludge - amine scrubbing - best', 'Hydrogen refuelling station, SMR', 'Hydrogen, gaseous, 700 bar, from SMR NG w/o CCS, at H2 fuelling station', 'transformer and rectifier unit, for electrolyzer', 'PEM electrolyzer, ACDC Converter', 'carbon dioxide, captured from atmosphere', 'PEM electrolyzer, Balance of Plant', 'Sabatier reaction methanation unit', 'PEM electrolyzer, Stack', 'hot water tank, carbon dioxide capture process', 'cooling unit, carbon dioxide capture process', 'diaphragm compressor module, high pressure', 'carbon dioxide capture system', 'Hydrogen dispenser, for gaseous hydrogen', 'diaphragms, for diaphragm compressor', 'MTG production facility, construction', 'Disposal, hydrogen fuelling station', 'production of 2 wt-% potassium iodide solution', 'production of nickle-based catalyst for methanation', 'wiring and tubing, carbon dioxide capture process', 'control panel, carbon dioxide capture process', 'adsorption and desorption unit, carbon dioxide capture process', 'Buffer tank', 'frequency converter, for diaphragm compressor', 'Hydrogen, gaseous, 30 bar, from hard coal gasification and reforming, at coal gasification plant', 'Methanol distillation', 'CO2 storage/at H2 production plant, pre, pipeline 200km, storage 1000m', 'Syngas, RWGS, Production', 'softwood forestry, mixed species, sustainable forest management, CF = -1', 'hardwood forestry, mixed species, sustainable forest management, CF = -1', 'Hydrogen, gaseous, 25 bar, from dual fluidised bed gasification of woody biomass with CCS, at gasification plant', 'market for wood chips, wet, measured as dry mass, CF = -1', 'Hydrogen, gaseous, 700 bar, from electrolysis, at H2 fuelling station', 'Hydrogen, gaseous, 25 bar, from electrolysis', 'Hydrogen, gaseous, 700 bar, from dual fluidised bed gasification of woody biomass with CCS, at H2 fuelling station', 'SMR BM, HT+LT, + CCS (MDEA), 98 (average), digestate incineration, 26 bar', 'Hydrogen, gaseous, 700 bar, from SMR of biogas, at H2 fuelling station', 'SMR NG + CCS (MDEA), 98 (average), 25 bar', 'SMR BM, HT+LT, with digestate incineration, 26 bar', 'Hydrogen, gaseous, 700 bar, from dual fluidised bed gasification of woody biomass, at H2 fuelling station', 'Hydrogen, gaseous, 700 bar, from SMR of biogas with CCS, at H2 fuelling station', 'Hydrogen, gaseous, 700 bar, from SMR NG w CCS, at H2 fuelling station', 'SMR NG + CCS (MDEA), 98 (average), 700 bar', 'Hydrogen, gaseous, 25 bar, from dual fluidised bed gasification of woody biomass, at gasification plant', 'Methanol Synthesis', 'Diesel production, synthetic, Fischer Tropsch process', 'Gasoline production, synthetic, from methanol']
uvek_activities_to_remove = ['market for activated carbon, granular', 'market for iodine', 'market for manganese sulfate', 'market for molybdenum trioxide', 'market for nickel sulfate', 'market for soda ash, light, crystalline, heptahydrate']
ei35_activities_to_remove = ['latex production']
uvek_multiplication_factors = {'Steam, for chemical processes, at plant': (1 / 2.257), 'Natural gas, from high pressure network (1-5 bar), at service station': 0.842, 'Disposal, passenger car': (1 / 1600)}
list_act = []
if presamples:
presamples_matrix = []
non_zeroes = np.nonzero(self.array[(0, :, :)])
(u, c) = np.unique(non_zeroes[1], return_counts=True)
dup = u[(c > 1)]
coords = np.column_stack((non_zeroes[0][np.isin(non_zeroes[1], dup)], non_zeroes[1][np.isin(non_zeroes[1], dup)]))
bar = pyprind.ProgBar(len(dup))
for d in dup:
bar.update(item_id=d)
list_exc = []
for (row, col) in coords[(coords[(:, 1)] == d)]:
tuple_output = self.indices[col]
tuple_input = self.indices[row]
mult_factor = 1
if ((ecoinvent_compatibility == False) and (tuple_output[0] in activities_to_be_removed)):
break
if (ecoinvent_compatibility == False):
tuple_output = self.map_ecoinvent_remind.get(tuple_output, tuple_output)
tuple_input = self.map_ecoinvent_remind.get(tuple_input, tuple_input)
if (ecoinvent_compatibility == True):
tuple_output = self.map_remind_ecoinvent.get(tuple_output, tuple_output)
tuple_input = self.map_remind_ecoinvent.get(tuple_input, tuple_input)
if (ecoinvent_version == '3.5'):
tuple_output = self.map_36_to_35.get(tuple_output, tuple_output)
tuple_input = self.map_36_to_35.get(tuple_input, tuple_input)
if (tuple_output[0] in ei35_activities_to_remove):
continue
if (tuple_input[0] in ei35_activities_to_remove):
continue
if (ecoinvent_version == 'uvek'):
tuple_output = self.map_36_to_uvek.get(tuple_output, tuple_output)
if (tuple_input[0] in uvek_activities_to_remove):
continue
else:
tuple_input = self.map_36_to_uvek.get(tuple_input, tuple_input)
if (tuple_input[0] in uvek_multiplication_factors):
mult_factor = uvek_multiplication_factors[tuple_input[0]]
if (len(self.array[(:, row, col)]) == 1):
amount = (self.array[(0, row, col)] * mult_factor)
uncertainty = [('uncertainty type', 1)]
elif np.all(np.isclose(self.array[(:, row, col)], self.array[(0, row, col)])):
amount = (self.array[(0, row, col)] * mult_factor)
uncertainty = [('uncertainty type', 1)]
elif (presamples == True):
amount = (np.median(self.array[(:, row, col)]) * mult_factor)
uncertainty = [('uncertainty type', 1)]
if (len(tuple_input) > 3):
type_exc = 'technosphere'
else:
type_exc = 'biosphere'
presamples_matrix.append(((self.array[(:, row, col)] * (- 1)), [(tuple_input, tuple_output, type_exc)], type_exc))
tag = [self.tags[t] for t in list(self.tags.keys()) if (t in tuple_input[0])]
if (len(tag) > 0):
tag = tag[0]
else:
tag = 'other'
if (tuple_output == tuple_input):
list_exc.append({'name': tuple_output[0], 'database': self.db_name, 'amount': amount, 'unit': tuple_output[2], 'type': 'production', 'location': tuple_output[1], 'reference product': tuple_output[3]})
list_exc[(- 1)].update(uncertainty)
elif (len(tuple_input) > 3):
list_exc.append({'name': tuple_input[0], 'database': self.db_name, 'amount': (amount * (- 1)), 'unit': tuple_input[2], 'type': 'technosphere', 'location': tuple_input[1], 'reference product': tuple_input[3], 'tag': tag})
list_exc[(- 1)].update(uncertainty)
else:
list_exc.append({'name': tuple_input[0], 'database': 'biosphere3', 'amount': (amount * (- 1)), 'unit': tuple_input[2], 'type': 'biosphere', 'categories': tuple_input[1], 'tag': tag})
list_exc[(- 1)].update(uncertainty)
else:
tag = [self.tags[t] for t in list(self.tags.keys()) if (t in tuple_output[0])]
if (len(tag) > 0):
tag = tag[0]
else:
tag = 'other'
list_act.append({'production amount': 1, 'database': self.db_name, 'name': tuple_output[0], 'unit': tuple_output[2], 'location': tuple_output[1], 'exchanges': list_exc, 'reference product': tuple_output[3], 'type': 'process', 'code': str(uuid.uuid1()), 'tag': tag})
if presamples:
return (list_act, presamples_matrix)
else:
return list_act<|docstring|>Return the inventory as a dictionary
If if there several values for one exchange, uncertainty information is generated.
If `presamples` is True, returns the inventory as well as a `presamples` matrix.
If `presamples` is False, returns the inventory with characterized uncertainty information.
If `ecoinvent_compatibility` is True, the inventory is made compatible with ecoinvent. If False,
the inventory is compatible with the REMIND-ecoinvent hybrid database output of the `rmnd_lca` library.
:returns: a dictionary that contains all the exchanges
:rtype: dict<|endoftext|> |
07ea8306bec5e59a2b1e76a5756e061d8311c7e298c3281d1b5098a2779bc8c1 | def write_lci_to_excel(self, directory, ecoinvent_compatibility, ecoinvent_version, software_compatibility, filename=None):
'\n Export an Excel file that can be consumed by the software defined in `software_compatibility`.\n\n :param directory: str. path to export the file to.\n :param ecoinvent_compatibility: bool. If True, the inventory is compatible with ecoinvent. If False, the inventory is compatible with REMIND-ecoinvent.\n :param ecoinvent_version: str. "3.5", "3.6" or "uvek"\n :param software_compatibility: str. "brightway2" or "simapro"\n :returns: returns the file path of the exported inventory.\n :rtype: str.\n '
if (software_compatibility == 'brightway2'):
if (filename is None):
safe_name = (safe_filename('carculator_inventory_export_{}_brightway2'.format(str(datetime.date.today())), False) + '.xlsx')
else:
safe_name = (safe_filename(filename, False) + '.xlsx')
else:
safe_name = (safe_filename('carculator_inventory_export_{}_simapro'.format(str(datetime.date.today())), False) + '.csv')
if (directory is None):
filepath_export = safe_name
else:
if (not os.path.exists(directory)):
os.makedirs(directory)
filepath_export = os.path.join(directory, safe_name)
list_act = self.write_lci(False, ecoinvent_compatibility, ecoinvent_version)
if (software_compatibility == 'brightway2'):
data = []
data.extend((['Database', self.db_name], ('format', 'Excel spreadsheet')))
data.append([])
for k in list_act:
if k.get('exchanges'):
data.extend((['Activity', k['name']], ('location', k['location']), ('production amount', float(k['production amount'])), ('reference product', k.get('reference product')), ('type', 'process'), ('unit', k['unit']), ('worksheet name', 'None'), ['Exchanges'], ['name', 'amount', 'database', 'location', 'unit', 'categories', 'type', 'reference product', 'tag']))
for e in k['exchanges']:
data.append([e['name'], float(e['amount']), e['database'], e.get('location', 'None'), e['unit'], '::'.join(e.get('categories', ())), e['type'], e.get('reference product'), e.get('tag', 'other')])
else:
data.extend((['Activity', k['name']], ('type', 'biosphere'), ('unit', k['unit']), ('worksheet name', 'None')))
data.append([])
workbook = xlsxwriter.Workbook(filepath_export)
bold = workbook.add_format({'bold': True})
bold.set_font_size(12)
highlighted = {'Activity', 'Database', 'Exchanges', 'Parameters', 'Database parameters', 'Project parameters'}
frmt = (lambda x: (bold if (row[0] in highlighted) else None))
sheet = workbook.add_worksheet(create_valid_worksheet_name('test'))
for (row_index, row) in enumerate(data):
for (col_index, value) in enumerate(row):
if (value is None):
continue
elif isinstance(value, float):
sheet.write_number(row_index, col_index, value, frmt(value))
else:
sheet.write_string(row_index, col_index, value, frmt(value))
print('Inventories exported to {}.'.format(filepath_export))
workbook.close()
else:
filename = 'simapro-biosphere.json'
filepath = (DATA_DIR / filename)
if (not filepath.is_file()):
raise FileNotFoundError('The dictionary of biosphere flow match between ecoinvent and Simapro could not be found.')
with open(filepath) as json_file:
data = json.load(json_file)
dict_bio = {}
for d in data:
dict_bio[d[2]] = d[1]
filename = 'simapro-technosphere-3.5.csv'
filepath = (DATA_DIR / filename)
with open(filepath) as f:
csv_list = [[val.strip() for val in r.split(';')] for r in f.readlines()]
((_, _, *header), *data) = csv_list
dict_tech = {}
for row in data:
(name, location, simapro_name) = row
dict_tech[(name, location)] = simapro_name
headers = ['{CSV separator: Semicolon}', '{CSV Format version: 7.0.0}', '{Decimal separator: .}', '{Date separator: /}', '{Short date format: dd/MM/yyyy}']
fields = ['Process', 'Category type', 'Time Period', 'Geography', 'Technology', 'Representativeness', 'Multiple output allocation', 'Substitution allocation', 'Cut off rules', 'Capital goods', 'Date', 'Boundary with nature', 'Record', 'Generator', 'Literature references', 'External documents', 'Collection method', 'Data treatment', 'Verification', 'Products', 'Materials/fuels', 'Resources', 'Emissions to air', 'Emissions to water', 'Emissions to soil', 'Final waste flows', 'Non material emission', 'Social issues', 'Economic issues', 'Waste to treatment', 'End']
simapro_units = {'kilogram': 'kg', 'cubic meter': 'm3', 'kilowatt hour': 'kWh', 'kilometer': 'km', 'ton kilometer': 'tkm', 'megajoule': 'mj', 'unit': 'unit', 'square meter': 'm2', 'kilowatt': 'kW', 'hour': 'h', 'square meter-year': 'm2a', 'meter': 'm', 'vehicle-kilometer': 'vkm', 'meter-year': 'ma'}
with open(filepath_export, 'w', newline='') as csvFile:
writer = csv.writer(csvFile, delimiter=';')
for item in headers:
writer.writerow([item])
writer.writerow([])
for a in list_act:
for item in fields:
writer.writerow([item])
if (item == 'Process'):
name = ((((a['name'].capitalize() + ' {') + a.get('location', 'GLO')) + '}') + '| Cut-off, U')
writer.writerow([name])
if (item == 'Generator'):
writer.writerow([('carculator ' + str(__version__))])
if (item == 'Geography'):
writer.writerow([a['location']])
if (item == 'Time Period'):
writer.writerow(['Between 2010 and 2020. Extrapolated to the selected years.'])
if (item == 'Date'):
writer.writerow([str(datetime.date.today())])
if (item == 'Cut off rules'):
writer.writerow(['100:0 - polluter pays-principle.'])
if (item == 'Multiple output allocation'):
writer.writerow(['No'])
if (item == 'Substitution allocation'):
writer.writerow(['No'])
if (item == 'Capital goods'):
writer.writerow(['Included when relevant (e.g., factory and machinery.)'])
if (item == 'Literature references'):
writer.writerow(['Sacchi, R. et al., 2020, Renewable and Sustainable Energy Reviews (in review), https://www.psi.ch/en/ta/preprint'])
if (item == 'External documents'):
writer.writerow(['https://carculator.psi.ch'])
if (item == 'Collection method'):
writer.writerow(['Modeling and assumptions: https://carculator.readthedocs.io/en/latest/modeling.html'])
if (item == 'Verification'):
writer.writerow(['In review. Susceptible to change.'])
if (item == 'Products'):
for e in a['exchanges']:
if (e['type'] == 'production'):
name = ((((e['reference product'].capitalize() + ' {') + e.get('location', 'GLO')) + '}') + '| Cut-off, U')
writer.writerow([dict_tech.get((a['name'], a['location']), name), simapro_units[a['unit']], 1.0, '100%', 'not defined', a['database']])
if (item == 'Materials/fuels'):
for e in a['exchanges']:
if ((e['type'] == 'technosphere') and ('waste' not in e['name'])):
name = ((((e['reference product'].capitalize() + ' {') + e.get('location', 'GLO')) + '}') + '| Cut-off, U')
writer.writerow([dict_tech.get((e['name'], e['location']), name), simapro_units[e['unit']], e['amount'], 'undefined', 0, 0, 0])
if (item == 'Resources'):
for e in a['exchanges']:
if ((e['type'] == 'biosphere') and (e['categories'][0] == 'natural resource')):
writer.writerow([dict_bio.get(e['name']), simapro_units[e['unit']], e['amount'], 'undefined', 0, 0, 0])
if (item == 'Emissions to air'):
for e in a['exchanges']:
if ((e['type'] == 'biosphere') and (e['categories'][0] == 'air')):
writer.writerow([dict_bio.get(e['name'], e['name']), simapro_units[e['unit']], e['amount'], 'undefined', 0, 0, 0])
if (item == 'Emissions to water'):
for e in a['exchanges']:
if ((e['type'] == 'biosphere') and (e['categories'][0] == 'water')):
writer.writerow([dict_bio.get(e['name'], e['name']), simapro_units[e['unit']], e['amount'], 'undefined', 0, 0, 0])
if (item == 'Emissions to soil'):
for e in a['exchanges']:
if ((e['type'] == 'biosphere') and (e['categories'][0] == 'soil')):
writer.writerow([dict_bio.get(e['name'], e['name']), simapro_units[e['unit']], e['amount'], 'undefined', 0, 0, 0])
if (item == 'Final waste flows'):
for e in a['exchanges']:
if ((e['type'] == 'technosphere') and ('waste' in e['name'])):
writer.writerow([dict_bio.get(e['name'], e['name']), simapro_units[e['unit']], e['amount'], 'undefined', 0, 0, 0])
writer.writerow([])
csvFile.close()
return filepath_export | Export an Excel file that can be consumed by the software defined in `software_compatibility`.
:param directory: str. path to export the file to.
:param ecoinvent_compatibility: bool. If True, the inventory is compatible with ecoinvent. If False, the inventory is compatible with REMIND-ecoinvent.
:param ecoinvent_version: str. "3.5", "3.6" or "uvek"
:param software_compatibility: str. "brightway2" or "simapro"
:returns: returns the file path of the exported inventory.
:rtype: str. | carculator/export.py | write_lci_to_excel | SimonVoelker/carculator | 0 | python | def write_lci_to_excel(self, directory, ecoinvent_compatibility, ecoinvent_version, software_compatibility, filename=None):
'\n Export an Excel file that can be consumed by the software defined in `software_compatibility`.\n\n :param directory: str. path to export the file to.\n :param ecoinvent_compatibility: bool. If True, the inventory is compatible with ecoinvent. If False, the inventory is compatible with REMIND-ecoinvent.\n :param ecoinvent_version: str. "3.5", "3.6" or "uvek"\n :param software_compatibility: str. "brightway2" or "simapro"\n :returns: returns the file path of the exported inventory.\n :rtype: str.\n '
if (software_compatibility == 'brightway2'):
if (filename is None):
safe_name = (safe_filename('carculator_inventory_export_{}_brightway2'.format(str(datetime.date.today())), False) + '.xlsx')
else:
safe_name = (safe_filename(filename, False) + '.xlsx')
else:
safe_name = (safe_filename('carculator_inventory_export_{}_simapro'.format(str(datetime.date.today())), False) + '.csv')
if (directory is None):
filepath_export = safe_name
else:
if (not os.path.exists(directory)):
os.makedirs(directory)
filepath_export = os.path.join(directory, safe_name)
list_act = self.write_lci(False, ecoinvent_compatibility, ecoinvent_version)
if (software_compatibility == 'brightway2'):
data = []
data.extend((['Database', self.db_name], ('format', 'Excel spreadsheet')))
data.append([])
for k in list_act:
if k.get('exchanges'):
data.extend((['Activity', k['name']], ('location', k['location']), ('production amount', float(k['production amount'])), ('reference product', k.get('reference product')), ('type', 'process'), ('unit', k['unit']), ('worksheet name', 'None'), ['Exchanges'], ['name', 'amount', 'database', 'location', 'unit', 'categories', 'type', 'reference product', 'tag']))
for e in k['exchanges']:
data.append([e['name'], float(e['amount']), e['database'], e.get('location', 'None'), e['unit'], '::'.join(e.get('categories', ())), e['type'], e.get('reference product'), e.get('tag', 'other')])
else:
data.extend((['Activity', k['name']], ('type', 'biosphere'), ('unit', k['unit']), ('worksheet name', 'None')))
data.append([])
workbook = xlsxwriter.Workbook(filepath_export)
bold = workbook.add_format({'bold': True})
bold.set_font_size(12)
highlighted = {'Activity', 'Database', 'Exchanges', 'Parameters', 'Database parameters', 'Project parameters'}
frmt = (lambda x: (bold if (row[0] in highlighted) else None))
sheet = workbook.add_worksheet(create_valid_worksheet_name('test'))
for (row_index, row) in enumerate(data):
for (col_index, value) in enumerate(row):
if (value is None):
continue
elif isinstance(value, float):
sheet.write_number(row_index, col_index, value, frmt(value))
else:
sheet.write_string(row_index, col_index, value, frmt(value))
print('Inventories exported to {}.'.format(filepath_export))
workbook.close()
else:
filename = 'simapro-biosphere.json'
filepath = (DATA_DIR / filename)
if (not filepath.is_file()):
raise FileNotFoundError('The dictionary of biosphere flow match between ecoinvent and Simapro could not be found.')
with open(filepath) as json_file:
data = json.load(json_file)
dict_bio = {}
for d in data:
dict_bio[d[2]] = d[1]
filename = 'simapro-technosphere-3.5.csv'
filepath = (DATA_DIR / filename)
with open(filepath) as f:
csv_list = [[val.strip() for val in r.split(';')] for r in f.readlines()]
((_, _, *header), *data) = csv_list
dict_tech = {}
for row in data:
(name, location, simapro_name) = row
dict_tech[(name, location)] = simapro_name
headers = ['{CSV separator: Semicolon}', '{CSV Format version: 7.0.0}', '{Decimal separator: .}', '{Date separator: /}', '{Short date format: dd/MM/yyyy}']
fields = ['Process', 'Category type', 'Time Period', 'Geography', 'Technology', 'Representativeness', 'Multiple output allocation', 'Substitution allocation', 'Cut off rules', 'Capital goods', 'Date', 'Boundary with nature', 'Record', 'Generator', 'Literature references', 'External documents', 'Collection method', 'Data treatment', 'Verification', 'Products', 'Materials/fuels', 'Resources', 'Emissions to air', 'Emissions to water', 'Emissions to soil', 'Final waste flows', 'Non material emission', 'Social issues', 'Economic issues', 'Waste to treatment', 'End']
simapro_units = {'kilogram': 'kg', 'cubic meter': 'm3', 'kilowatt hour': 'kWh', 'kilometer': 'km', 'ton kilometer': 'tkm', 'megajoule': 'mj', 'unit': 'unit', 'square meter': 'm2', 'kilowatt': 'kW', 'hour': 'h', 'square meter-year': 'm2a', 'meter': 'm', 'vehicle-kilometer': 'vkm', 'meter-year': 'ma'}
with open(filepath_export, 'w', newline=) as csvFile:
writer = csv.writer(csvFile, delimiter=';')
for item in headers:
writer.writerow([item])
writer.writerow([])
for a in list_act:
for item in fields:
writer.writerow([item])
if (item == 'Process'):
name = ((((a['name'].capitalize() + ' {') + a.get('location', 'GLO')) + '}') + '| Cut-off, U')
writer.writerow([name])
if (item == 'Generator'):
writer.writerow([('carculator ' + str(__version__))])
if (item == 'Geography'):
writer.writerow([a['location']])
if (item == 'Time Period'):
writer.writerow(['Between 2010 and 2020. Extrapolated to the selected years.'])
if (item == 'Date'):
writer.writerow([str(datetime.date.today())])
if (item == 'Cut off rules'):
writer.writerow(['100:0 - polluter pays-principle.'])
if (item == 'Multiple output allocation'):
writer.writerow(['No'])
if (item == 'Substitution allocation'):
writer.writerow(['No'])
if (item == 'Capital goods'):
writer.writerow(['Included when relevant (e.g., factory and machinery.)'])
if (item == 'Literature references'):
writer.writerow(['Sacchi, R. et al., 2020, Renewable and Sustainable Energy Reviews (in review), https://www.psi.ch/en/ta/preprint'])
if (item == 'External documents'):
writer.writerow(['https://carculator.psi.ch'])
if (item == 'Collection method'):
writer.writerow(['Modeling and assumptions: https://carculator.readthedocs.io/en/latest/modeling.html'])
if (item == 'Verification'):
writer.writerow(['In review. Susceptible to change.'])
if (item == 'Products'):
for e in a['exchanges']:
if (e['type'] == 'production'):
name = ((((e['reference product'].capitalize() + ' {') + e.get('location', 'GLO')) + '}') + '| Cut-off, U')
writer.writerow([dict_tech.get((a['name'], a['location']), name), simapro_units[a['unit']], 1.0, '100%', 'not defined', a['database']])
if (item == 'Materials/fuels'):
for e in a['exchanges']:
if ((e['type'] == 'technosphere') and ('waste' not in e['name'])):
name = ((((e['reference product'].capitalize() + ' {') + e.get('location', 'GLO')) + '}') + '| Cut-off, U')
writer.writerow([dict_tech.get((e['name'], e['location']), name), simapro_units[e['unit']], e['amount'], 'undefined', 0, 0, 0])
if (item == 'Resources'):
for e in a['exchanges']:
if ((e['type'] == 'biosphere') and (e['categories'][0] == 'natural resource')):
writer.writerow([dict_bio.get(e['name']), simapro_units[e['unit']], e['amount'], 'undefined', 0, 0, 0])
if (item == 'Emissions to air'):
for e in a['exchanges']:
if ((e['type'] == 'biosphere') and (e['categories'][0] == 'air')):
writer.writerow([dict_bio.get(e['name'], e['name']), simapro_units[e['unit']], e['amount'], 'undefined', 0, 0, 0])
if (item == 'Emissions to water'):
for e in a['exchanges']:
if ((e['type'] == 'biosphere') and (e['categories'][0] == 'water')):
writer.writerow([dict_bio.get(e['name'], e['name']), simapro_units[e['unit']], e['amount'], 'undefined', 0, 0, 0])
if (item == 'Emissions to soil'):
for e in a['exchanges']:
if ((e['type'] == 'biosphere') and (e['categories'][0] == 'soil')):
writer.writerow([dict_bio.get(e['name'], e['name']), simapro_units[e['unit']], e['amount'], 'undefined', 0, 0, 0])
if (item == 'Final waste flows'):
for e in a['exchanges']:
if ((e['type'] == 'technosphere') and ('waste' in e['name'])):
writer.writerow([dict_bio.get(e['name'], e['name']), simapro_units[e['unit']], e['amount'], 'undefined', 0, 0, 0])
writer.writerow([])
csvFile.close()
return filepath_export | def write_lci_to_excel(self, directory, ecoinvent_compatibility, ecoinvent_version, software_compatibility, filename=None):
'\n Export an Excel file that can be consumed by the software defined in `software_compatibility`.\n\n :param directory: str. path to export the file to.\n :param ecoinvent_compatibility: bool. If True, the inventory is compatible with ecoinvent. If False, the inventory is compatible with REMIND-ecoinvent.\n :param ecoinvent_version: str. "3.5", "3.6" or "uvek"\n :param software_compatibility: str. "brightway2" or "simapro"\n :returns: returns the file path of the exported inventory.\n :rtype: str.\n '
if (software_compatibility == 'brightway2'):
if (filename is None):
safe_name = (safe_filename('carculator_inventory_export_{}_brightway2'.format(str(datetime.date.today())), False) + '.xlsx')
else:
safe_name = (safe_filename(filename, False) + '.xlsx')
else:
safe_name = (safe_filename('carculator_inventory_export_{}_simapro'.format(str(datetime.date.today())), False) + '.csv')
if (directory is None):
filepath_export = safe_name
else:
if (not os.path.exists(directory)):
os.makedirs(directory)
filepath_export = os.path.join(directory, safe_name)
list_act = self.write_lci(False, ecoinvent_compatibility, ecoinvent_version)
if (software_compatibility == 'brightway2'):
data = []
data.extend((['Database', self.db_name], ('format', 'Excel spreadsheet')))
data.append([])
for k in list_act:
if k.get('exchanges'):
data.extend((['Activity', k['name']], ('location', k['location']), ('production amount', float(k['production amount'])), ('reference product', k.get('reference product')), ('type', 'process'), ('unit', k['unit']), ('worksheet name', 'None'), ['Exchanges'], ['name', 'amount', 'database', 'location', 'unit', 'categories', 'type', 'reference product', 'tag']))
for e in k['exchanges']:
data.append([e['name'], float(e['amount']), e['database'], e.get('location', 'None'), e['unit'], '::'.join(e.get('categories', ())), e['type'], e.get('reference product'), e.get('tag', 'other')])
else:
data.extend((['Activity', k['name']], ('type', 'biosphere'), ('unit', k['unit']), ('worksheet name', 'None')))
data.append([])
workbook = xlsxwriter.Workbook(filepath_export)
bold = workbook.add_format({'bold': True})
bold.set_font_size(12)
highlighted = {'Activity', 'Database', 'Exchanges', 'Parameters', 'Database parameters', 'Project parameters'}
frmt = (lambda x: (bold if (row[0] in highlighted) else None))
sheet = workbook.add_worksheet(create_valid_worksheet_name('test'))
for (row_index, row) in enumerate(data):
for (col_index, value) in enumerate(row):
if (value is None):
continue
elif isinstance(value, float):
sheet.write_number(row_index, col_index, value, frmt(value))
else:
sheet.write_string(row_index, col_index, value, frmt(value))
print('Inventories exported to {}.'.format(filepath_export))
workbook.close()
else:
filename = 'simapro-biosphere.json'
filepath = (DATA_DIR / filename)
if (not filepath.is_file()):
raise FileNotFoundError('The dictionary of biosphere flow match between ecoinvent and Simapro could not be found.')
with open(filepath) as json_file:
data = json.load(json_file)
dict_bio = {}
for d in data:
dict_bio[d[2]] = d[1]
filename = 'simapro-technosphere-3.5.csv'
filepath = (DATA_DIR / filename)
with open(filepath) as f:
csv_list = [[val.strip() for val in r.split(';')] for r in f.readlines()]
((_, _, *header), *data) = csv_list
dict_tech = {}
for row in data:
(name, location, simapro_name) = row
dict_tech[(name, location)] = simapro_name
headers = ['{CSV separator: Semicolon}', '{CSV Format version: 7.0.0}', '{Decimal separator: .}', '{Date separator: /}', '{Short date format: dd/MM/yyyy}']
fields = ['Process', 'Category type', 'Time Period', 'Geography', 'Technology', 'Representativeness', 'Multiple output allocation', 'Substitution allocation', 'Cut off rules', 'Capital goods', 'Date', 'Boundary with nature', 'Record', 'Generator', 'Literature references', 'External documents', 'Collection method', 'Data treatment', 'Verification', 'Products', 'Materials/fuels', 'Resources', 'Emissions to air', 'Emissions to water', 'Emissions to soil', 'Final waste flows', 'Non material emission', 'Social issues', 'Economic issues', 'Waste to treatment', 'End']
simapro_units = {'kilogram': 'kg', 'cubic meter': 'm3', 'kilowatt hour': 'kWh', 'kilometer': 'km', 'ton kilometer': 'tkm', 'megajoule': 'mj', 'unit': 'unit', 'square meter': 'm2', 'kilowatt': 'kW', 'hour': 'h', 'square meter-year': 'm2a', 'meter': 'm', 'vehicle-kilometer': 'vkm', 'meter-year': 'ma'}
with open(filepath_export, 'w', newline=) as csvFile:
writer = csv.writer(csvFile, delimiter=';')
for item in headers:
writer.writerow([item])
writer.writerow([])
for a in list_act:
for item in fields:
writer.writerow([item])
if (item == 'Process'):
name = ((((a['name'].capitalize() + ' {') + a.get('location', 'GLO')) + '}') + '| Cut-off, U')
writer.writerow([name])
if (item == 'Generator'):
writer.writerow([('carculator ' + str(__version__))])
if (item == 'Geography'):
writer.writerow([a['location']])
if (item == 'Time Period'):
writer.writerow(['Between 2010 and 2020. Extrapolated to the selected years.'])
if (item == 'Date'):
writer.writerow([str(datetime.date.today())])
if (item == 'Cut off rules'):
writer.writerow(['100:0 - polluter pays-principle.'])
if (item == 'Multiple output allocation'):
writer.writerow(['No'])
if (item == 'Substitution allocation'):
writer.writerow(['No'])
if (item == 'Capital goods'):
writer.writerow(['Included when relevant (e.g., factory and machinery.)'])
if (item == 'Literature references'):
writer.writerow(['Sacchi, R. et al., 2020, Renewable and Sustainable Energy Reviews (in review), https://www.psi.ch/en/ta/preprint'])
if (item == 'External documents'):
writer.writerow(['https://carculator.psi.ch'])
if (item == 'Collection method'):
writer.writerow(['Modeling and assumptions: https://carculator.readthedocs.io/en/latest/modeling.html'])
if (item == 'Verification'):
writer.writerow(['In review. Susceptible to change.'])
if (item == 'Products'):
for e in a['exchanges']:
if (e['type'] == 'production'):
name = ((((e['reference product'].capitalize() + ' {') + e.get('location', 'GLO')) + '}') + '| Cut-off, U')
writer.writerow([dict_tech.get((a['name'], a['location']), name), simapro_units[a['unit']], 1.0, '100%', 'not defined', a['database']])
if (item == 'Materials/fuels'):
for e in a['exchanges']:
if ((e['type'] == 'technosphere') and ('waste' not in e['name'])):
name = ((((e['reference product'].capitalize() + ' {') + e.get('location', 'GLO')) + '}') + '| Cut-off, U')
writer.writerow([dict_tech.get((e['name'], e['location']), name), simapro_units[e['unit']], e['amount'], 'undefined', 0, 0, 0])
if (item == 'Resources'):
for e in a['exchanges']:
if ((e['type'] == 'biosphere') and (e['categories'][0] == 'natural resource')):
writer.writerow([dict_bio.get(e['name']), simapro_units[e['unit']], e['amount'], 'undefined', 0, 0, 0])
if (item == 'Emissions to air'):
for e in a['exchanges']:
if ((e['type'] == 'biosphere') and (e['categories'][0] == 'air')):
writer.writerow([dict_bio.get(e['name'], e['name']), simapro_units[e['unit']], e['amount'], 'undefined', 0, 0, 0])
if (item == 'Emissions to water'):
for e in a['exchanges']:
if ((e['type'] == 'biosphere') and (e['categories'][0] == 'water')):
writer.writerow([dict_bio.get(e['name'], e['name']), simapro_units[e['unit']], e['amount'], 'undefined', 0, 0, 0])
if (item == 'Emissions to soil'):
for e in a['exchanges']:
if ((e['type'] == 'biosphere') and (e['categories'][0] == 'soil')):
writer.writerow([dict_bio.get(e['name'], e['name']), simapro_units[e['unit']], e['amount'], 'undefined', 0, 0, 0])
if (item == 'Final waste flows'):
for e in a['exchanges']:
if ((e['type'] == 'technosphere') and ('waste' in e['name'])):
writer.writerow([dict_bio.get(e['name'], e['name']), simapro_units[e['unit']], e['amount'], 'undefined', 0, 0, 0])
writer.writerow([])
csvFile.close()
return filepath_export<|docstring|>Export an Excel file that can be consumed by the software defined in `software_compatibility`.
:param directory: str. path to export the file to.
:param ecoinvent_compatibility: bool. If True, the inventory is compatible with ecoinvent. If False, the inventory is compatible with REMIND-ecoinvent.
:param ecoinvent_version: str. "3.5", "3.6" or "uvek"
:param software_compatibility: str. "brightway2" or "simapro"
:returns: returns the file path of the exported inventory.
:rtype: str.<|endoftext|> |
17d085257d02727e8367cfa0a9c2d86cc0606545874576d2dedea71fbf314c30 | def write_lci_to_bw(self, presamples, ecoinvent_compatibility, ecoinvent_version):
'\n Return a LCIImporter object with the inventory as `data` attribute.\n\n :return: LCIImporter object to be imported in a Brightway2 project\n :rtype: bw2io.base_lci.LCIImporter\n '
if (presamples == True):
(data, array) = self.write_lci(presamples, ecoinvent_compatibility, ecoinvent_version)
i = bw2io.importers.base_lci.LCIImporter(self.db_name)
i.data = data
return (i, array)
else:
data = self.write_lci(presamples, ecoinvent_compatibility, ecoinvent_version)
i = bw2io.importers.base_lci.LCIImporter(self.db_name)
i.data = data
return i | Return a LCIImporter object with the inventory as `data` attribute.
:return: LCIImporter object to be imported in a Brightway2 project
:rtype: bw2io.base_lci.LCIImporter | carculator/export.py | write_lci_to_bw | SimonVoelker/carculator | 0 | python | def write_lci_to_bw(self, presamples, ecoinvent_compatibility, ecoinvent_version):
'\n Return a LCIImporter object with the inventory as `data` attribute.\n\n :return: LCIImporter object to be imported in a Brightway2 project\n :rtype: bw2io.base_lci.LCIImporter\n '
if (presamples == True):
(data, array) = self.write_lci(presamples, ecoinvent_compatibility, ecoinvent_version)
i = bw2io.importers.base_lci.LCIImporter(self.db_name)
i.data = data
return (i, array)
else:
data = self.write_lci(presamples, ecoinvent_compatibility, ecoinvent_version)
i = bw2io.importers.base_lci.LCIImporter(self.db_name)
i.data = data
return i | def write_lci_to_bw(self, presamples, ecoinvent_compatibility, ecoinvent_version):
'\n Return a LCIImporter object with the inventory as `data` attribute.\n\n :return: LCIImporter object to be imported in a Brightway2 project\n :rtype: bw2io.base_lci.LCIImporter\n '
if (presamples == True):
(data, array) = self.write_lci(presamples, ecoinvent_compatibility, ecoinvent_version)
i = bw2io.importers.base_lci.LCIImporter(self.db_name)
i.data = data
return (i, array)
else:
data = self.write_lci(presamples, ecoinvent_compatibility, ecoinvent_version)
i = bw2io.importers.base_lci.LCIImporter(self.db_name)
i.data = data
return i<|docstring|>Return a LCIImporter object with the inventory as `data` attribute.
:return: LCIImporter object to be imported in a Brightway2 project
:rtype: bw2io.base_lci.LCIImporter<|endoftext|> |
68e1629ee8b5ede40cc314ad7f9213694decfc709e67be7a60b76c87b9bd976d | def make_pdf(self, dist, params, size=10000):
"Generate distributions's Probability Distribution Function "
import pandas as pd
arg = params[:(- 2)]
loc = params[(- 2)]
scale = params[(- 1)]
start = (dist.ppf(0.01, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.01, loc=loc, scale=scale))
end = (dist.ppf(0.99, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.99, loc=loc, scale=scale))
x = np.linspace(start, end, size)
y = dist.pdf(x, *arg, loc=loc, scale=scale)
pdf = pd.Series(y, x)
return pdf | Generate distributions's Probability Distribution Function | carculator/export.py | make_pdf | SimonVoelker/carculator | 0 | python | def make_pdf(self, dist, params, size=10000):
" "
import pandas as pd
arg = params[:(- 2)]
loc = params[(- 2)]
scale = params[(- 1)]
start = (dist.ppf(0.01, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.01, loc=loc, scale=scale))
end = (dist.ppf(0.99, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.99, loc=loc, scale=scale))
x = np.linspace(start, end, size)
y = dist.pdf(x, *arg, loc=loc, scale=scale)
pdf = pd.Series(y, x)
return pdf | def make_pdf(self, dist, params, size=10000):
" "
import pandas as pd
arg = params[:(- 2)]
loc = params[(- 2)]
scale = params[(- 1)]
start = (dist.ppf(0.01, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.01, loc=loc, scale=scale))
end = (dist.ppf(0.99, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.99, loc=loc, scale=scale))
x = np.linspace(start, end, size)
y = dist.pdf(x, *arg, loc=loc, scale=scale)
pdf = pd.Series(y, x)
return pdf<|docstring|>Generate distributions's Probability Distribution Function<|endoftext|> |
78172f453840be45a9fab6858fa551770422b85ef3c1638330be5cc843fb1866 | @commands.command(name='SingleShot', aliases=['Shoot'])
async def single_shot(self, ctx, *, msg: str):
'\n Performs a Combat roll for a single shot attack.\n\n Example:\n !SingleShot reflex, skill, number of dice, dice sides, damage modifier, distance\n '
params = msg.split(',')
if (len(params) == 6):
result = self.RollSingleShotAttack(int(params[0]), int(params[1]), int(params[2]), int(params[3]), int(params[4]), int(params[5]))
(await ctx.send(result.Summary()))
else:
(await ctx.send('You are missing a piece of information.')) | Performs a Combat roll for a single shot attack.
Example:
!SingleShot reflex, skill, number of dice, dice sides, damage modifier, distance | bots/cogs/cp2020.py | single_shot | BryanOrabutt/discbot | 0 | python | @commands.command(name='SingleShot', aliases=['Shoot'])
async def single_shot(self, ctx, *, msg: str):
'\n Performs a Combat roll for a single shot attack.\n\n Example:\n !SingleShot reflex, skill, number of dice, dice sides, damage modifier, distance\n '
params = msg.split(',')
if (len(params) == 6):
result = self.RollSingleShotAttack(int(params[0]), int(params[1]), int(params[2]), int(params[3]), int(params[4]), int(params[5]))
(await ctx.send(result.Summary()))
else:
(await ctx.send('You are missing a piece of information.')) | @commands.command(name='SingleShot', aliases=['Shoot'])
async def single_shot(self, ctx, *, msg: str):
'\n Performs a Combat roll for a single shot attack.\n\n Example:\n !SingleShot reflex, skill, number of dice, dice sides, damage modifier, distance\n '
params = msg.split(',')
if (len(params) == 6):
result = self.RollSingleShotAttack(int(params[0]), int(params[1]), int(params[2]), int(params[3]), int(params[4]), int(params[5]))
(await ctx.send(result.Summary()))
else:
(await ctx.send('You are missing a piece of information.'))<|docstring|>Performs a Combat roll for a single shot attack.
Example:
!SingleShot reflex, skill, number of dice, dice sides, damage modifier, distance<|endoftext|> |
9048ff8e2f44a161de9cdd133340778ac0c6857fdba0aaad6fb224346fccb3af | @commands.command(name='BurstShot', aliases=['Burst'])
async def burst_shot(self, ctx, *, msg: str):
'\n Performs a Combat roll for a burst attack.\n\n Example:\n !BurstShot reflex, skill, number of dice, dice sides, damage modifier, distance\n '
params = msg.split(',')
if (len(params) == 6):
result = self.RollBurstAttack(int(params[0]), int(params[1]), int(params[2]), int(params[3]), int(params[4]), int(params[5]))
(await ctx.send(result.Summary()))
else:
(await ctx.send('You are missing a piece of information.')) | Performs a Combat roll for a burst attack.
Example:
!BurstShot reflex, skill, number of dice, dice sides, damage modifier, distance | bots/cogs/cp2020.py | burst_shot | BryanOrabutt/discbot | 0 | python | @commands.command(name='BurstShot', aliases=['Burst'])
async def burst_shot(self, ctx, *, msg: str):
'\n Performs a Combat roll for a burst attack.\n\n Example:\n !BurstShot reflex, skill, number of dice, dice sides, damage modifier, distance\n '
params = msg.split(',')
if (len(params) == 6):
result = self.RollBurstAttack(int(params[0]), int(params[1]), int(params[2]), int(params[3]), int(params[4]), int(params[5]))
(await ctx.send(result.Summary()))
else:
(await ctx.send('You are missing a piece of information.')) | @commands.command(name='BurstShot', aliases=['Burst'])
async def burst_shot(self, ctx, *, msg: str):
'\n Performs a Combat roll for a burst attack.\n\n Example:\n !BurstShot reflex, skill, number of dice, dice sides, damage modifier, distance\n '
params = msg.split(',')
if (len(params) == 6):
result = self.RollBurstAttack(int(params[0]), int(params[1]), int(params[2]), int(params[3]), int(params[4]), int(params[5]))
(await ctx.send(result.Summary()))
else:
(await ctx.send('You are missing a piece of information.'))<|docstring|>Performs a Combat roll for a burst attack.
Example:
!BurstShot reflex, skill, number of dice, dice sides, damage modifier, distance<|endoftext|> |
1fd651124a1510c8e8996671abd85b5dfdce8217b8cb7283b87467c1b3286614 | @commands.command(name='FullAutoShot', aliases=['FAS'])
async def full_auto_shot(self, ctx, *, msg: str):
'\n Performs a Combat roll for a full auto attack.\n\n Example:\n !FullAutoShot reflex, skill, shotsFired, number of dice, dice sides, damage modifier, distance\n '
params = msg.split(',')
if (len(params) == 7):
result = self.RollFullAutoAttack(int(params[0]), int(params[1]), int(params[2]), int(params[3]), int(params[4]), int(params[5]), int(params[6]))
(await ctx.send(result.Summary()))
else:
(await ctx.send('You are missing a piece of information.')) | Performs a Combat roll for a full auto attack.
Example:
!FullAutoShot reflex, skill, shotsFired, number of dice, dice sides, damage modifier, distance | bots/cogs/cp2020.py | full_auto_shot | BryanOrabutt/discbot | 0 | python | @commands.command(name='FullAutoShot', aliases=['FAS'])
async def full_auto_shot(self, ctx, *, msg: str):
'\n Performs a Combat roll for a full auto attack.\n\n Example:\n !FullAutoShot reflex, skill, shotsFired, number of dice, dice sides, damage modifier, distance\n '
params = msg.split(',')
if (len(params) == 7):
result = self.RollFullAutoAttack(int(params[0]), int(params[1]), int(params[2]), int(params[3]), int(params[4]), int(params[5]), int(params[6]))
(await ctx.send(result.Summary()))
else:
(await ctx.send('You are missing a piece of information.')) | @commands.command(name='FullAutoShot', aliases=['FAS'])
async def full_auto_shot(self, ctx, *, msg: str):
'\n Performs a Combat roll for a full auto attack.\n\n Example:\n !FullAutoShot reflex, skill, shotsFired, number of dice, dice sides, damage modifier, distance\n '
params = msg.split(',')
if (len(params) == 7):
result = self.RollFullAutoAttack(int(params[0]), int(params[1]), int(params[2]), int(params[3]), int(params[4]), int(params[5]), int(params[6]))
(await ctx.send(result.Summary()))
else:
(await ctx.send('You are missing a piece of information.'))<|docstring|>Performs a Combat roll for a full auto attack.
Example:
!FullAutoShot reflex, skill, shotsFired, number of dice, dice sides, damage modifier, distance<|endoftext|> |
2861f7b619250c817b99edf11f9f03b2f6e0c9bf2dba2f30f0aaff350dec50de | def mms_feeps_pad_spinavg(probe='1', data_units='intensity', datatype='electron', data_rate='srvy', level='l2', suffix='', energy=[70, 600], bin_size=16.3636):
"\n This function will spin-average the FEEPS pitch angle distributions\n \n Parameters:\n probe: str\n probe #, e.g., '4' for MMS4\n\n data_units: str\n 'intensity' or 'count_rate'\n\n datatype: str\n 'electron' or 'ion'\n\n data_rate: str\n instrument data rate, e.g., 'srvy' or 'brst'\n\n level: str\n data level, e.g., 'l2'\n\n suffix: str\n suffix of the loaded data\n\n energy: list of float\n energy range to include in the calculation\n \n bin_size: float\n size of the pitch angle bins\n\n Returns:\n Name of tplot variable created.\n "
units_label = ''
if (data_units == 'intensity'):
units_label = '1/(cm^2-sr-s-keV)'
elif (data_units == 'counts'):
units_label = '[counts/s]'
if (datatype == 'electron'):
lower_en = 71
else:
lower_en = 78
prefix = (('mms' + str(probe)) + '_epd_feeps_')
(sector_times, spin_sectors) = get_data((((((((prefix + data_rate) + '_') + level) + '_') + datatype) + '_spinsectnum') + suffix))
spin_starts = [(spin_end + 1) for spin_end in np.where((spin_sectors[:(- 1)] >= spin_sectors[1:]))[0]]
en_range_string = (((str(int(energy[0])) + '-') + str(int(energy[1]))) + 'keV')
var_name = (((((((((((prefix + data_rate) + '_') + level) + '_') + datatype) + '_') + data_units) + '_') + en_range_string) + '_pad') + suffix)
(times, data, angles) = get_data(var_name)
spin_avg_flux = np.zeros([len(spin_starts), len(angles)])
current_start = spin_starts[0]
for spin_idx in range(1, (len(spin_starts) - 1)):
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=RuntimeWarning)
spin_avg_flux[((spin_idx - 1), :)] = np.nanmean(data[(current_start:(spin_starts[spin_idx] + 1), :)], axis=0)
current_start = (spin_starts[spin_idx] + 1)
store_data(((var_name + '_spin') + suffix), data={'x': times[spin_starts], 'y': spin_avg_flux, 'v': angles})
options(((var_name + '_spin') + suffix), 'spec', True)
options(((var_name + '_spin') + suffix), 'ylog', False)
options(((var_name + '_spin') + suffix), 'zlog', True)
options(((var_name + '_spin') + suffix), 'Colormap', 'jet')
options(((var_name + '_spin') + suffix), 'ztitle', units_label)
options(((var_name + '_spin') + suffix), 'ytitle', (((('MMS' + str(probe)) + ' ') + datatype) + ' PA (deg)'))
return ((var_name + '_spin') + suffix) | This function will spin-average the FEEPS pitch angle distributions
Parameters:
probe: str
probe #, e.g., '4' for MMS4
data_units: str
'intensity' or 'count_rate'
datatype: str
'electron' or 'ion'
data_rate: str
instrument data rate, e.g., 'srvy' or 'brst'
level: str
data level, e.g., 'l2'
suffix: str
suffix of the loaded data
energy: list of float
energy range to include in the calculation
bin_size: float
size of the pitch angle bins
Returns:
Name of tplot variable created. | pyspedas/mms/feeps/mms_feeps_pad_spinavg.py | mms_feeps_pad_spinavg | xnchu/pyspedas | 1 | python | def mms_feeps_pad_spinavg(probe='1', data_units='intensity', datatype='electron', data_rate='srvy', level='l2', suffix=, energy=[70, 600], bin_size=16.3636):
"\n This function will spin-average the FEEPS pitch angle distributions\n \n Parameters:\n probe: str\n probe #, e.g., '4' for MMS4\n\n data_units: str\n 'intensity' or 'count_rate'\n\n datatype: str\n 'electron' or 'ion'\n\n data_rate: str\n instrument data rate, e.g., 'srvy' or 'brst'\n\n level: str\n data level, e.g., 'l2'\n\n suffix: str\n suffix of the loaded data\n\n energy: list of float\n energy range to include in the calculation\n \n bin_size: float\n size of the pitch angle bins\n\n Returns:\n Name of tplot variable created.\n "
units_label =
if (data_units == 'intensity'):
units_label = '1/(cm^2-sr-s-keV)'
elif (data_units == 'counts'):
units_label = '[counts/s]'
if (datatype == 'electron'):
lower_en = 71
else:
lower_en = 78
prefix = (('mms' + str(probe)) + '_epd_feeps_')
(sector_times, spin_sectors) = get_data((((((((prefix + data_rate) + '_') + level) + '_') + datatype) + '_spinsectnum') + suffix))
spin_starts = [(spin_end + 1) for spin_end in np.where((spin_sectors[:(- 1)] >= spin_sectors[1:]))[0]]
en_range_string = (((str(int(energy[0])) + '-') + str(int(energy[1]))) + 'keV')
var_name = (((((((((((prefix + data_rate) + '_') + level) + '_') + datatype) + '_') + data_units) + '_') + en_range_string) + '_pad') + suffix)
(times, data, angles) = get_data(var_name)
spin_avg_flux = np.zeros([len(spin_starts), len(angles)])
current_start = spin_starts[0]
for spin_idx in range(1, (len(spin_starts) - 1)):
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=RuntimeWarning)
spin_avg_flux[((spin_idx - 1), :)] = np.nanmean(data[(current_start:(spin_starts[spin_idx] + 1), :)], axis=0)
current_start = (spin_starts[spin_idx] + 1)
store_data(((var_name + '_spin') + suffix), data={'x': times[spin_starts], 'y': spin_avg_flux, 'v': angles})
options(((var_name + '_spin') + suffix), 'spec', True)
options(((var_name + '_spin') + suffix), 'ylog', False)
options(((var_name + '_spin') + suffix), 'zlog', True)
options(((var_name + '_spin') + suffix), 'Colormap', 'jet')
options(((var_name + '_spin') + suffix), 'ztitle', units_label)
options(((var_name + '_spin') + suffix), 'ytitle', (((('MMS' + str(probe)) + ' ') + datatype) + ' PA (deg)'))
return ((var_name + '_spin') + suffix) | def mms_feeps_pad_spinavg(probe='1', data_units='intensity', datatype='electron', data_rate='srvy', level='l2', suffix=, energy=[70, 600], bin_size=16.3636):
"\n This function will spin-average the FEEPS pitch angle distributions\n \n Parameters:\n probe: str\n probe #, e.g., '4' for MMS4\n\n data_units: str\n 'intensity' or 'count_rate'\n\n datatype: str\n 'electron' or 'ion'\n\n data_rate: str\n instrument data rate, e.g., 'srvy' or 'brst'\n\n level: str\n data level, e.g., 'l2'\n\n suffix: str\n suffix of the loaded data\n\n energy: list of float\n energy range to include in the calculation\n \n bin_size: float\n size of the pitch angle bins\n\n Returns:\n Name of tplot variable created.\n "
units_label =
if (data_units == 'intensity'):
units_label = '1/(cm^2-sr-s-keV)'
elif (data_units == 'counts'):
units_label = '[counts/s]'
if (datatype == 'electron'):
lower_en = 71
else:
lower_en = 78
prefix = (('mms' + str(probe)) + '_epd_feeps_')
(sector_times, spin_sectors) = get_data((((((((prefix + data_rate) + '_') + level) + '_') + datatype) + '_spinsectnum') + suffix))
spin_starts = [(spin_end + 1) for spin_end in np.where((spin_sectors[:(- 1)] >= spin_sectors[1:]))[0]]
en_range_string = (((str(int(energy[0])) + '-') + str(int(energy[1]))) + 'keV')
var_name = (((((((((((prefix + data_rate) + '_') + level) + '_') + datatype) + '_') + data_units) + '_') + en_range_string) + '_pad') + suffix)
(times, data, angles) = get_data(var_name)
spin_avg_flux = np.zeros([len(spin_starts), len(angles)])
current_start = spin_starts[0]
for spin_idx in range(1, (len(spin_starts) - 1)):
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=RuntimeWarning)
spin_avg_flux[((spin_idx - 1), :)] = np.nanmean(data[(current_start:(spin_starts[spin_idx] + 1), :)], axis=0)
current_start = (spin_starts[spin_idx] + 1)
store_data(((var_name + '_spin') + suffix), data={'x': times[spin_starts], 'y': spin_avg_flux, 'v': angles})
options(((var_name + '_spin') + suffix), 'spec', True)
options(((var_name + '_spin') + suffix), 'ylog', False)
options(((var_name + '_spin') + suffix), 'zlog', True)
options(((var_name + '_spin') + suffix), 'Colormap', 'jet')
options(((var_name + '_spin') + suffix), 'ztitle', units_label)
options(((var_name + '_spin') + suffix), 'ytitle', (((('MMS' + str(probe)) + ' ') + datatype) + ' PA (deg)'))
return ((var_name + '_spin') + suffix)<|docstring|>This function will spin-average the FEEPS pitch angle distributions
Parameters:
probe: str
probe #, e.g., '4' for MMS4
data_units: str
'intensity' or 'count_rate'
datatype: str
'electron' or 'ion'
data_rate: str
instrument data rate, e.g., 'srvy' or 'brst'
level: str
data level, e.g., 'l2'
suffix: str
suffix of the loaded data
energy: list of float
energy range to include in the calculation
bin_size: float
size of the pitch angle bins
Returns:
Name of tplot variable created.<|endoftext|> |
3554f5d87f684fee34c882709d4e8efb0ce3e299f54fa13d3eabe3ea691f9a7a | def handle(self):
'Handles a request ignoring dropped connections.'
try:
self.stager = self.server.stager
self.shell = self.stager.shell
self.options = copy.deepcopy(self.server.server.options)
self.loader = core.loader
self.shell.print_verbose(('handler::handle() - Incoming HTTP from %s' % str(self.client_address)))
return BaseHTTPRequestHandler.handle(self)
except (socket.error, socket.timeout) as e:
pass | Handles a request ignoring dropped connections. | core/handler.py | handle | fymore/- | 9 | python | def handle(self):
try:
self.stager = self.server.stager
self.shell = self.stager.shell
self.options = copy.deepcopy(self.server.server.options)
self.loader = core.loader
self.shell.print_verbose(('handler::handle() - Incoming HTTP from %s' % str(self.client_address)))
return BaseHTTPRequestHandler.handle(self)
except (socket.error, socket.timeout) as e:
pass | def handle(self):
try:
self.stager = self.server.stager
self.shell = self.stager.shell
self.options = copy.deepcopy(self.server.server.options)
self.loader = core.loader
self.shell.print_verbose(('handler::handle() - Incoming HTTP from %s' % str(self.client_address)))
return BaseHTTPRequestHandler.handle(self)
except (socket.error, socket.timeout) as e:
pass<|docstring|>Handles a request ignoring dropped connections.<|endoftext|> |
551d1b43565a6683232d5db514e576a03c481e42886492269aba6f6e4e68f1b6 | @app.get('/graph', status_code=200, tags=['READ', 'Graph'])
def connect_Graph():
' connects to the dgraph server'
global graph_conn
try:
graph_conn = dGraph_conn()
except Exception as e:
logging.error('At connecting to graph DB')
logging.error(e)
raise HTTPException(status_code=502, detail=('Not connected to Graph. ' + str(e)))
return {'msg': 'Connected to graph'} | connects to the dgraph server | dgraph/dGraph_fastAPI_server.py | connect_Graph | kavitharaju/vachan-graph | 3 | python | @app.get('/graph', status_code=200, tags=['READ', 'Graph'])
def connect_Graph():
' '
global graph_conn
try:
graph_conn = dGraph_conn()
except Exception as e:
logging.error('At connecting to graph DB')
logging.error(e)
raise HTTPException(status_code=502, detail=('Not connected to Graph. ' + str(e)))
return {'msg': 'Connected to graph'} | @app.get('/graph', status_code=200, tags=['READ', 'Graph'])
def connect_Graph():
' '
global graph_conn
try:
graph_conn = dGraph_conn()
except Exception as e:
logging.error('At connecting to graph DB')
logging.error(e)
raise HTTPException(status_code=502, detail=('Not connected to Graph. ' + str(e)))
return {'msg': 'Connected to graph'}<|docstring|>connects to the dgraph server<|endoftext|> |
3c532e35f58f0432435ec593ce9d55371d473b5a42f050c0c995dba3f1c65d27 | @app.delete('/graph', status_code=200, tags=['Graph', 'WRITE'])
def delete():
' delete the entire graph'
global graph_conn
try:
res = graph_conn.drop_all()
except Exception as e:
logging.error('At deleting graph DB')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
return {'msg': 'Deleted the entire graph'} | delete the entire graph | dgraph/dGraph_fastAPI_server.py | delete | kavitharaju/vachan-graph | 3 | python | @app.delete('/graph', status_code=200, tags=['Graph', 'WRITE'])
def delete():
' '
global graph_conn
try:
res = graph_conn.drop_all()
except Exception as e:
logging.error('At deleting graph DB')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
return {'msg': 'Deleted the entire graph'} | @app.delete('/graph', status_code=200, tags=['Graph', 'WRITE'])
def delete():
' '
global graph_conn
try:
res = graph_conn.drop_all()
except Exception as e:
logging.error('At deleting graph DB')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
return {'msg': 'Deleted the entire graph'}<|docstring|>delete the entire graph<|endoftext|> |
29715075a69eeec3bb5c927d77ba946febabdf0e7f2e10bbe0d7a970765dcc6d | @app.get('/strongs', status_code=200, tags=['READ', 'Strongs Number'])
def get_strongs(strongs_number: Optional[int]=None, bbbcccvvv: Optional[str]=Query(None, regex='^\\w\\w\\w\\d\\d\\d\\d\\d\\d'), skip: Optional[int]=None, limit: Optional[int]=None):
' Get the list of strongs nodes and their property values.\n\tIf strongs_number is sepcified, its properties and occurances are returned.\n\tIf strongs_number is not present and bbbcccvvv(bbb- 3 letter bookcode, ccc- chapter number in 3 digits, vvv- verse number in 3 digits)\n\tis provided, lists all strongs in that verse, with their property values and positions(as per Gree bible).\n\tIf neither of the first two query params are provided, it lists all the strongs numbers in Greek.\n\tNumber of items returned can be set using the skip and limit parameters.'
result = {}
try:
if ((not strongs_number) and (not bbbcccvvv)):
query_res = graph_conn.query_data(all_strongs_query, {'$dummy': ''})
elif strongs_number:
query_res = graph_conn.query_data(strongs_link_query, {'$strongs': str(strongs_number)})
logging.info(('query_res: %s' % query_res))
else:
variables = {'$book': str(book_num_map[bbbcccvvv[:3].lower()]), '$chap': bbbcccvvv[3:6], '$ver': bbbcccvvv[(- 3):]}
logging.info(('variables: %s' % variables))
query_res = graph_conn.query_data(strongs_in_verse_query, variables)
logging.info(('query_res: %s' % query_res))
except Exception as e:
logging.error('At fetching strongs numbers')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info(('skip: %s, limit %s' % (skip, limit)))
if (not skip):
skip = (- 1)
if (not limit):
limit = len(query_res['strongs'])
result['strongs'] = query_res['strongs'][(skip + 1):limit]
for (i, strong) in enumerate(result['strongs']):
if ('occurances' in strong):
occurs = []
for occur in strong['occurances']:
logging.info(occur)
logging.info(num_book_map)
verse_link = ('%s/bibles/%s/books/%s/chapters/%s/verses/%s/words/%s' % (base_URL, occur['bible'], num_book_map[occur['book']], occur['chapter'], occur['verse'], occur['position']))
occurs.append(urllib.parse.quote(verse_link, safe='/:-'))
result['strongs'][i]['occurances'] = occurs
if ('StrongsNumber' in strong):
strong_link = ('%s/strongs?strongs_number=%s' % (base_URL, strong['StrongsNumber']))
result['strongs'][i]['strongsLink'] = urllib.parse.quote(strong_link, safe='/:?=')
return result | Get the list of strongs nodes and their property values.
If strongs_number is sepcified, its properties and occurances are returned.
If strongs_number is not present and bbbcccvvv(bbb- 3 letter bookcode, ccc- chapter number in 3 digits, vvv- verse number in 3 digits)
is provided, lists all strongs in that verse, with their property values and positions(as per Gree bible).
If neither of the first two query params are provided, it lists all the strongs numbers in Greek.
Number of items returned can be set using the skip and limit parameters. | dgraph/dGraph_fastAPI_server.py | get_strongs | kavitharaju/vachan-graph | 3 | python | @app.get('/strongs', status_code=200, tags=['READ', 'Strongs Number'])
def get_strongs(strongs_number: Optional[int]=None, bbbcccvvv: Optional[str]=Query(None, regex='^\\w\\w\\w\\d\\d\\d\\d\\d\\d'), skip: Optional[int]=None, limit: Optional[int]=None):
' Get the list of strongs nodes and their property values.\n\tIf strongs_number is sepcified, its properties and occurances are returned.\n\tIf strongs_number is not present and bbbcccvvv(bbb- 3 letter bookcode, ccc- chapter number in 3 digits, vvv- verse number in 3 digits)\n\tis provided, lists all strongs in that verse, with their property values and positions(as per Gree bible).\n\tIf neither of the first two query params are provided, it lists all the strongs numbers in Greek.\n\tNumber of items returned can be set using the skip and limit parameters.'
result = {}
try:
if ((not strongs_number) and (not bbbcccvvv)):
query_res = graph_conn.query_data(all_strongs_query, {'$dummy': })
elif strongs_number:
query_res = graph_conn.query_data(strongs_link_query, {'$strongs': str(strongs_number)})
logging.info(('query_res: %s' % query_res))
else:
variables = {'$book': str(book_num_map[bbbcccvvv[:3].lower()]), '$chap': bbbcccvvv[3:6], '$ver': bbbcccvvv[(- 3):]}
logging.info(('variables: %s' % variables))
query_res = graph_conn.query_data(strongs_in_verse_query, variables)
logging.info(('query_res: %s' % query_res))
except Exception as e:
logging.error('At fetching strongs numbers')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info(('skip: %s, limit %s' % (skip, limit)))
if (not skip):
skip = (- 1)
if (not limit):
limit = len(query_res['strongs'])
result['strongs'] = query_res['strongs'][(skip + 1):limit]
for (i, strong) in enumerate(result['strongs']):
if ('occurances' in strong):
occurs = []
for occur in strong['occurances']:
logging.info(occur)
logging.info(num_book_map)
verse_link = ('%s/bibles/%s/books/%s/chapters/%s/verses/%s/words/%s' % (base_URL, occur['bible'], num_book_map[occur['book']], occur['chapter'], occur['verse'], occur['position']))
occurs.append(urllib.parse.quote(verse_link, safe='/:-'))
result['strongs'][i]['occurances'] = occurs
if ('StrongsNumber' in strong):
strong_link = ('%s/strongs?strongs_number=%s' % (base_URL, strong['StrongsNumber']))
result['strongs'][i]['strongsLink'] = urllib.parse.quote(strong_link, safe='/:?=')
return result | @app.get('/strongs', status_code=200, tags=['READ', 'Strongs Number'])
def get_strongs(strongs_number: Optional[int]=None, bbbcccvvv: Optional[str]=Query(None, regex='^\\w\\w\\w\\d\\d\\d\\d\\d\\d'), skip: Optional[int]=None, limit: Optional[int]=None):
' Get the list of strongs nodes and their property values.\n\tIf strongs_number is sepcified, its properties and occurances are returned.\n\tIf strongs_number is not present and bbbcccvvv(bbb- 3 letter bookcode, ccc- chapter number in 3 digits, vvv- verse number in 3 digits)\n\tis provided, lists all strongs in that verse, with their property values and positions(as per Gree bible).\n\tIf neither of the first two query params are provided, it lists all the strongs numbers in Greek.\n\tNumber of items returned can be set using the skip and limit parameters.'
result = {}
try:
if ((not strongs_number) and (not bbbcccvvv)):
query_res = graph_conn.query_data(all_strongs_query, {'$dummy': })
elif strongs_number:
query_res = graph_conn.query_data(strongs_link_query, {'$strongs': str(strongs_number)})
logging.info(('query_res: %s' % query_res))
else:
variables = {'$book': str(book_num_map[bbbcccvvv[:3].lower()]), '$chap': bbbcccvvv[3:6], '$ver': bbbcccvvv[(- 3):]}
logging.info(('variables: %s' % variables))
query_res = graph_conn.query_data(strongs_in_verse_query, variables)
logging.info(('query_res: %s' % query_res))
except Exception as e:
logging.error('At fetching strongs numbers')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info(('skip: %s, limit %s' % (skip, limit)))
if (not skip):
skip = (- 1)
if (not limit):
limit = len(query_res['strongs'])
result['strongs'] = query_res['strongs'][(skip + 1):limit]
for (i, strong) in enumerate(result['strongs']):
if ('occurances' in strong):
occurs = []
for occur in strong['occurances']:
logging.info(occur)
logging.info(num_book_map)
verse_link = ('%s/bibles/%s/books/%s/chapters/%s/verses/%s/words/%s' % (base_URL, occur['bible'], num_book_map[occur['book']], occur['chapter'], occur['verse'], occur['position']))
occurs.append(urllib.parse.quote(verse_link, safe='/:-'))
result['strongs'][i]['occurances'] = occurs
if ('StrongsNumber' in strong):
strong_link = ('%s/strongs?strongs_number=%s' % (base_URL, strong['StrongsNumber']))
result['strongs'][i]['strongsLink'] = urllib.parse.quote(strong_link, safe='/:?=')
return result<|docstring|>Get the list of strongs nodes and their property values.
If strongs_number is sepcified, its properties and occurances are returned.
If strongs_number is not present and bbbcccvvv(bbb- 3 letter bookcode, ccc- chapter number in 3 digits, vvv- verse number in 3 digits)
is provided, lists all strongs in that verse, with their property values and positions(as per Gree bible).
If neither of the first two query params are provided, it lists all the strongs numbers in Greek.
Number of items returned can be set using the skip and limit parameters.<|endoftext|> |
138fe6e6f27165191bd20fd71bf058c31537514ac309b88697959e5e4661fe9f | @app.put('/strongs/{strongs_number}', status_code=200, tags=['Strongs Number', 'WRITE'])
def edit_strongs(strongs_number: int, key_values: List[StrongsPropertyValue]=Body(...)):
' Update a property value of selected strongs number node'
logging.info(('input args strongs_number: %s, key_values: %s' % (strongs_number, key_values)))
nquad = ''
for prop in key_values:
nquad += ('uid(u) <%s> "%s" .\n' % (prop.property.value, prop.value))
logging.info(('nquad: %s' % nquad))
try:
graph_conn.upsert(query=strong_node_query, nquad=nquad, variables={'$strongs': str(strongs_number)})
except Exception as e:
logging.error('At editing strongs numbers')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
raise HTTPException(status_code=503, detail='Not implemented properly. ') | Update a property value of selected strongs number node | dgraph/dGraph_fastAPI_server.py | edit_strongs | kavitharaju/vachan-graph | 3 | python | @app.put('/strongs/{strongs_number}', status_code=200, tags=['Strongs Number', 'WRITE'])
def edit_strongs(strongs_number: int, key_values: List[StrongsPropertyValue]=Body(...)):
' '
logging.info(('input args strongs_number: %s, key_values: %s' % (strongs_number, key_values)))
nquad =
for prop in key_values:
nquad += ('uid(u) <%s> "%s" .\n' % (prop.property.value, prop.value))
logging.info(('nquad: %s' % nquad))
try:
graph_conn.upsert(query=strong_node_query, nquad=nquad, variables={'$strongs': str(strongs_number)})
except Exception as e:
logging.error('At editing strongs numbers')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
raise HTTPException(status_code=503, detail='Not implemented properly. ') | @app.put('/strongs/{strongs_number}', status_code=200, tags=['Strongs Number', 'WRITE'])
def edit_strongs(strongs_number: int, key_values: List[StrongsPropertyValue]=Body(...)):
' '
logging.info(('input args strongs_number: %s, key_values: %s' % (strongs_number, key_values)))
nquad =
for prop in key_values:
nquad += ('uid(u) <%s> "%s" .\n' % (prop.property.value, prop.value))
logging.info(('nquad: %s' % nquad))
try:
graph_conn.upsert(query=strong_node_query, nquad=nquad, variables={'$strongs': str(strongs_number)})
except Exception as e:
logging.error('At editing strongs numbers')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
raise HTTPException(status_code=503, detail='Not implemented properly. ')<|docstring|>Update a property value of selected strongs number node<|endoftext|> |
84e6cb1950c63924172906659a8aa91a11f76551471b0ca841a4161964333300 | @app.post('/strongs', status_code=201, tags=['WRITE', 'Strongs Number'])
def add_strongs():
'creates a strongs dictionary.\n\t Collects strongs data from mysql DB and add to graph \n\t '
try:
db = pymysql.connect(host='localhost', database=rel_db_name, user='root', password='password', charset='utf8mb4')
cursor = db.cursor(pymysql.cursors.SSCursor)
except Exception as e:
logging.error('At MySql DB connection')
logging.error(e)
raise HTTPException(status_code=502, detail=('MySQL side error. ' + str(e)))
tablename = 'Greek_Strongs_Lexicon'
nodename = 'Greek Strongs'
dict_node = {'dictionary': nodename, 'dgraph.type': 'DictionaryNode'}
try:
dict_node_uid = graph_conn.create_data(dict_node)
except Exception as e:
logging.error('At dict node creation')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info(('dict_node_uid: %s' % dict_node_uid))
cursor.execute((('Select ID, Pronunciation, Lexeme, Transliteration, Definition, StrongsNumber, EnglishWord from ' + tablename) + ' order by ID'))
count_for_test = 0
while True:
next_row = cursor.fetchone()
if (not next_row):
break
count_for_test += 1
strongID = next_row[0]
Pronunciation = next_row[1]
Lexeme = next_row[2]
Transliteration = next_row[3]
Definition = next_row[4]
StrongsNumberExtended = next_row[5]
EnglishWord = next_row[6]
strong_node = {'dgraph.type': 'StrongsNode', 'StrongsNumber': strongID, 'pronunciation': Pronunciation, 'lexeme': Lexeme, 'transliteration': Transliteration, 'definition': Definition, 'strongsNumberExtended': StrongsNumberExtended, 'englishWord': EnglishWord, 'belongsTo': {'uid': dict_node_uid}}
try:
strong_node_uid = graph_conn.create_data(strong_node)
except Exception as e:
logging.error('At strong node creation')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info(('strong_node_uid: %s' % strong_node_uid))
cursor.close()
db.close()
return {'msg': 'Added to graph'} | creates a strongs dictionary.
Collects strongs data from mysql DB and add to graph | dgraph/dGraph_fastAPI_server.py | add_strongs | kavitharaju/vachan-graph | 3 | python | @app.post('/strongs', status_code=201, tags=['WRITE', 'Strongs Number'])
def add_strongs():
'creates a strongs dictionary.\n\t Collects strongs data from mysql DB and add to graph \n\t '
try:
db = pymysql.connect(host='localhost', database=rel_db_name, user='root', password='password', charset='utf8mb4')
cursor = db.cursor(pymysql.cursors.SSCursor)
except Exception as e:
logging.error('At MySql DB connection')
logging.error(e)
raise HTTPException(status_code=502, detail=('MySQL side error. ' + str(e)))
tablename = 'Greek_Strongs_Lexicon'
nodename = 'Greek Strongs'
dict_node = {'dictionary': nodename, 'dgraph.type': 'DictionaryNode'}
try:
dict_node_uid = graph_conn.create_data(dict_node)
except Exception as e:
logging.error('At dict node creation')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info(('dict_node_uid: %s' % dict_node_uid))
cursor.execute((('Select ID, Pronunciation, Lexeme, Transliteration, Definition, StrongsNumber, EnglishWord from ' + tablename) + ' order by ID'))
count_for_test = 0
while True:
next_row = cursor.fetchone()
if (not next_row):
break
count_for_test += 1
strongID = next_row[0]
Pronunciation = next_row[1]
Lexeme = next_row[2]
Transliteration = next_row[3]
Definition = next_row[4]
StrongsNumberExtended = next_row[5]
EnglishWord = next_row[6]
strong_node = {'dgraph.type': 'StrongsNode', 'StrongsNumber': strongID, 'pronunciation': Pronunciation, 'lexeme': Lexeme, 'transliteration': Transliteration, 'definition': Definition, 'strongsNumberExtended': StrongsNumberExtended, 'englishWord': EnglishWord, 'belongsTo': {'uid': dict_node_uid}}
try:
strong_node_uid = graph_conn.create_data(strong_node)
except Exception as e:
logging.error('At strong node creation')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info(('strong_node_uid: %s' % strong_node_uid))
cursor.close()
db.close()
return {'msg': 'Added to graph'} | @app.post('/strongs', status_code=201, tags=['WRITE', 'Strongs Number'])
def add_strongs():
'creates a strongs dictionary.\n\t Collects strongs data from mysql DB and add to graph \n\t '
try:
db = pymysql.connect(host='localhost', database=rel_db_name, user='root', password='password', charset='utf8mb4')
cursor = db.cursor(pymysql.cursors.SSCursor)
except Exception as e:
logging.error('At MySql DB connection')
logging.error(e)
raise HTTPException(status_code=502, detail=('MySQL side error. ' + str(e)))
tablename = 'Greek_Strongs_Lexicon'
nodename = 'Greek Strongs'
dict_node = {'dictionary': nodename, 'dgraph.type': 'DictionaryNode'}
try:
dict_node_uid = graph_conn.create_data(dict_node)
except Exception as e:
logging.error('At dict node creation')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info(('dict_node_uid: %s' % dict_node_uid))
cursor.execute((('Select ID, Pronunciation, Lexeme, Transliteration, Definition, StrongsNumber, EnglishWord from ' + tablename) + ' order by ID'))
count_for_test = 0
while True:
next_row = cursor.fetchone()
if (not next_row):
break
count_for_test += 1
strongID = next_row[0]
Pronunciation = next_row[1]
Lexeme = next_row[2]
Transliteration = next_row[3]
Definition = next_row[4]
StrongsNumberExtended = next_row[5]
EnglishWord = next_row[6]
strong_node = {'dgraph.type': 'StrongsNode', 'StrongsNumber': strongID, 'pronunciation': Pronunciation, 'lexeme': Lexeme, 'transliteration': Transliteration, 'definition': Definition, 'strongsNumberExtended': StrongsNumberExtended, 'englishWord': EnglishWord, 'belongsTo': {'uid': dict_node_uid}}
try:
strong_node_uid = graph_conn.create_data(strong_node)
except Exception as e:
logging.error('At strong node creation')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info(('strong_node_uid: %s' % strong_node_uid))
cursor.close()
db.close()
return {'msg': 'Added to graph'}<|docstring|>creates a strongs dictionary.
Collects strongs data from mysql DB and add to graph<|endoftext|> |
1be8feeb0d9d35bfada8ada23645b0b9d40baa1c71868427e9314a79665cbdf2 | @app.get('/translationwords', status_code=200, tags=['READ', 'Translation Words'])
def get_translationwords(translation_word: Optional[str]=None, bbbcccvvv: Optional[str]=Query(None, regex='^\\w\\w\\w\\d\\d\\d\\d\\d\\d'), skip: Optional[int]=None, limit: Optional[int]=None):
' Get the list of Translation word nodes and their property values.\n\tIf Translation word is sepcified, its properties and occurances are returned.\n\tIf Translation word is not present and bbbcccvvv(bbb- 3 letter bookcode, ccc- chapter number in 3 digits, vvv- verse number in 3 digits)\n\tis provided, lists all Translation words in that verse, with their property values and positions(as per Gree bible).\n\tIf neither of the first two query params are provided, it lists all the Translation words.\n\tNumber of items returned can be set using the skip and limit parameters.'
result = {}
try:
if ((not translation_word) and (not bbbcccvvv)):
query_res = graph_conn.query_data(all_tw_query, {'$dummy': ''})
elif translation_word:
query_res = graph_conn.query_data(tw_link_query, {'$tw': translation_word})
logging.info(('query_res: %s' % query_res))
else:
variables = {'$book': str(book_num_map[bbbcccvvv[:3].lower()]), '$chap': bbbcccvvv[3:6], '$ver': bbbcccvvv[(- 3):]}
logging.info(('variables: %s' % variables))
query_res = graph_conn.query_data(tw_in_verse_query, variables)
logging.info(('query_res: %s' % query_res))
except Exception as e:
logging.error('At fetching translation words')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info(('skip: %s, limit %s' % (skip, limit)))
if (not skip):
skip = (- 1)
if (not limit):
limit = len(query_res['tw'])
result['translationWords'] = query_res['tw'][(skip + 1):limit]
for (i, tw) in enumerate(result['translationWords']):
if ('occurances' in tw):
occurs = []
for occur in tw['occurances']:
verse_link = ('%s/bibles/%s/books/%s/chapters/%s/verses/%s/words/%s' % (base_URL, occur['bible'], num_book_map[occur['book']], occur['chapter'], occur['verse'], occur['position']))
occurs.append(urllib.parse.quote(verse_link, safe='/:-'))
result['translationWords'][i]['occurances'] = occurs
if ('translationWord' in tw):
link = ('%s/translationwords?translation_word=%s' % (base_URL, tw['translationWord']))
result['translationWords'][i]['translationWordLink'] = urllib.parse.quote(link, safe='/:?=')
return result | Get the list of Translation word nodes and their property values.
If Translation word is sepcified, its properties and occurances are returned.
If Translation word is not present and bbbcccvvv(bbb- 3 letter bookcode, ccc- chapter number in 3 digits, vvv- verse number in 3 digits)
is provided, lists all Translation words in that verse, with their property values and positions(as per Gree bible).
If neither of the first two query params are provided, it lists all the Translation words.
Number of items returned can be set using the skip and limit parameters. | dgraph/dGraph_fastAPI_server.py | get_translationwords | kavitharaju/vachan-graph | 3 | python | @app.get('/translationwords', status_code=200, tags=['READ', 'Translation Words'])
def get_translationwords(translation_word: Optional[str]=None, bbbcccvvv: Optional[str]=Query(None, regex='^\\w\\w\\w\\d\\d\\d\\d\\d\\d'), skip: Optional[int]=None, limit: Optional[int]=None):
' Get the list of Translation word nodes and their property values.\n\tIf Translation word is sepcified, its properties and occurances are returned.\n\tIf Translation word is not present and bbbcccvvv(bbb- 3 letter bookcode, ccc- chapter number in 3 digits, vvv- verse number in 3 digits)\n\tis provided, lists all Translation words in that verse, with their property values and positions(as per Gree bible).\n\tIf neither of the first two query params are provided, it lists all the Translation words.\n\tNumber of items returned can be set using the skip and limit parameters.'
result = {}
try:
if ((not translation_word) and (not bbbcccvvv)):
query_res = graph_conn.query_data(all_tw_query, {'$dummy': })
elif translation_word:
query_res = graph_conn.query_data(tw_link_query, {'$tw': translation_word})
logging.info(('query_res: %s' % query_res))
else:
variables = {'$book': str(book_num_map[bbbcccvvv[:3].lower()]), '$chap': bbbcccvvv[3:6], '$ver': bbbcccvvv[(- 3):]}
logging.info(('variables: %s' % variables))
query_res = graph_conn.query_data(tw_in_verse_query, variables)
logging.info(('query_res: %s' % query_res))
except Exception as e:
logging.error('At fetching translation words')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info(('skip: %s, limit %s' % (skip, limit)))
if (not skip):
skip = (- 1)
if (not limit):
limit = len(query_res['tw'])
result['translationWords'] = query_res['tw'][(skip + 1):limit]
for (i, tw) in enumerate(result['translationWords']):
if ('occurances' in tw):
occurs = []
for occur in tw['occurances']:
verse_link = ('%s/bibles/%s/books/%s/chapters/%s/verses/%s/words/%s' % (base_URL, occur['bible'], num_book_map[occur['book']], occur['chapter'], occur['verse'], occur['position']))
occurs.append(urllib.parse.quote(verse_link, safe='/:-'))
result['translationWords'][i]['occurances'] = occurs
if ('translationWord' in tw):
link = ('%s/translationwords?translation_word=%s' % (base_URL, tw['translationWord']))
result['translationWords'][i]['translationWordLink'] = urllib.parse.quote(link, safe='/:?=')
return result | @app.get('/translationwords', status_code=200, tags=['READ', 'Translation Words'])
def get_translationwords(translation_word: Optional[str]=None, bbbcccvvv: Optional[str]=Query(None, regex='^\\w\\w\\w\\d\\d\\d\\d\\d\\d'), skip: Optional[int]=None, limit: Optional[int]=None):
' Get the list of Translation word nodes and their property values.\n\tIf Translation word is sepcified, its properties and occurances are returned.\n\tIf Translation word is not present and bbbcccvvv(bbb- 3 letter bookcode, ccc- chapter number in 3 digits, vvv- verse number in 3 digits)\n\tis provided, lists all Translation words in that verse, with their property values and positions(as per Gree bible).\n\tIf neither of the first two query params are provided, it lists all the Translation words.\n\tNumber of items returned can be set using the skip and limit parameters.'
result = {}
try:
if ((not translation_word) and (not bbbcccvvv)):
query_res = graph_conn.query_data(all_tw_query, {'$dummy': })
elif translation_word:
query_res = graph_conn.query_data(tw_link_query, {'$tw': translation_word})
logging.info(('query_res: %s' % query_res))
else:
variables = {'$book': str(book_num_map[bbbcccvvv[:3].lower()]), '$chap': bbbcccvvv[3:6], '$ver': bbbcccvvv[(- 3):]}
logging.info(('variables: %s' % variables))
query_res = graph_conn.query_data(tw_in_verse_query, variables)
logging.info(('query_res: %s' % query_res))
except Exception as e:
logging.error('At fetching translation words')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info(('skip: %s, limit %s' % (skip, limit)))
if (not skip):
skip = (- 1)
if (not limit):
limit = len(query_res['tw'])
result['translationWords'] = query_res['tw'][(skip + 1):limit]
for (i, tw) in enumerate(result['translationWords']):
if ('occurances' in tw):
occurs = []
for occur in tw['occurances']:
verse_link = ('%s/bibles/%s/books/%s/chapters/%s/verses/%s/words/%s' % (base_URL, occur['bible'], num_book_map[occur['book']], occur['chapter'], occur['verse'], occur['position']))
occurs.append(urllib.parse.quote(verse_link, safe='/:-'))
result['translationWords'][i]['occurances'] = occurs
if ('translationWord' in tw):
link = ('%s/translationwords?translation_word=%s' % (base_URL, tw['translationWord']))
result['translationWords'][i]['translationWordLink'] = urllib.parse.quote(link, safe='/:?=')
return result<|docstring|>Get the list of Translation word nodes and their property values.
If Translation word is sepcified, its properties and occurances are returned.
If Translation word is not present and bbbcccvvv(bbb- 3 letter bookcode, ccc- chapter number in 3 digits, vvv- verse number in 3 digits)
is provided, lists all Translation words in that verse, with their property values and positions(as per Gree bible).
If neither of the first two query params are provided, it lists all the Translation words.
Number of items returned can be set using the skip and limit parameters.<|endoftext|> |
4f1e5226810a0776710c35ba2217c801e54f5e24ac2f5397b256e64934ed1a86 | @app.put('/translationwords/{translation_word}', status_code=200, tags=['WRITE', 'Translation Words'])
def edit_translationwords(translation_word: str, key_values: List[TwPropertyValue]=Body(...)):
' Update a property value of selected Translation word'
logging.info(('input args translation_word: %s, key_values: %s' % (translation_word, key_values)))
nquad = ''
for prop in key_values:
nquad += ('uid(u) <%s> "%s" .\n' % (prop.property.value, prop.value))
logging.info(('nquad: %s' % nquad))
try:
graph_conn.upsert(query=tw_node_query, nquad=nquad, variables={'$tw': translation_word})
except Exception as e:
logging.error('At editing translation word')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
raise HTTPException(status_code=503, detail='Not implemented properly. ') | Update a property value of selected Translation word | dgraph/dGraph_fastAPI_server.py | edit_translationwords | kavitharaju/vachan-graph | 3 | python | @app.put('/translationwords/{translation_word}', status_code=200, tags=['WRITE', 'Translation Words'])
def edit_translationwords(translation_word: str, key_values: List[TwPropertyValue]=Body(...)):
' '
logging.info(('input args translation_word: %s, key_values: %s' % (translation_word, key_values)))
nquad =
for prop in key_values:
nquad += ('uid(u) <%s> "%s" .\n' % (prop.property.value, prop.value))
logging.info(('nquad: %s' % nquad))
try:
graph_conn.upsert(query=tw_node_query, nquad=nquad, variables={'$tw': translation_word})
except Exception as e:
logging.error('At editing translation word')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
raise HTTPException(status_code=503, detail='Not implemented properly. ') | @app.put('/translationwords/{translation_word}', status_code=200, tags=['WRITE', 'Translation Words'])
def edit_translationwords(translation_word: str, key_values: List[TwPropertyValue]=Body(...)):
' '
logging.info(('input args translation_word: %s, key_values: %s' % (translation_word, key_values)))
nquad =
for prop in key_values:
nquad += ('uid(u) <%s> "%s" .\n' % (prop.property.value, prop.value))
logging.info(('nquad: %s' % nquad))
try:
graph_conn.upsert(query=tw_node_query, nquad=nquad, variables={'$tw': translation_word})
except Exception as e:
logging.error('At editing translation word')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
raise HTTPException(status_code=503, detail='Not implemented properly. ')<|docstring|>Update a property value of selected Translation word<|endoftext|> |
7e049584c3634722bc4b4e5719bea7e8e793a62e739108d007cb23381ffdab8e | @app.post('/translationwords', status_code=201, tags=['WRITE', 'Translation Words'])
def add_translationwords():
'creates a translation word dictionary.\n\t Collects tw data from CSV file and adds to graph \n\t '
tw_path = 'Resources/translationWords/tws.csv'
nodename = 'translation words'
dict_node = {'dictionary': nodename, 'dgraph.type': 'DictionaryNode'}
try:
dict_node_uid = graph_conn.create_data(dict_node)
except Exception as e:
logging.error('At dict node creation')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info(('dict_node_uid:%s' % dict_node_uid))
count_for_test = 0
with open(tw_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter='\t')
for row in csv_reader:
count_for_test += 1
sl_no = row[0]
tw = row[1]
Type = row[2]
word_forms = row[3].split(',')
description = row[4]
tw_node = {'dgraph.type': 'TWNode', 'translationWord': tw, 'slNo': sl_no, 'twType': Type, 'description': description, 'belongsTo': {'uid': dict_node_uid}}
if (len(word_forms) > 0):
tw_node['wordForms'] = word_forms
try:
tw_node_uid = graph_conn.create_data(tw_node)
except Exception as e:
logging.error('At tw node creation')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info(('tw_node_uid:%s' % tw_node_uid))
return {'msg': 'Added to graph'} | creates a translation word dictionary.
Collects tw data from CSV file and adds to graph | dgraph/dGraph_fastAPI_server.py | add_translationwords | kavitharaju/vachan-graph | 3 | python | @app.post('/translationwords', status_code=201, tags=['WRITE', 'Translation Words'])
def add_translationwords():
'creates a translation word dictionary.\n\t Collects tw data from CSV file and adds to graph \n\t '
tw_path = 'Resources/translationWords/tws.csv'
nodename = 'translation words'
dict_node = {'dictionary': nodename, 'dgraph.type': 'DictionaryNode'}
try:
dict_node_uid = graph_conn.create_data(dict_node)
except Exception as e:
logging.error('At dict node creation')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info(('dict_node_uid:%s' % dict_node_uid))
count_for_test = 0
with open(tw_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter='\t')
for row in csv_reader:
count_for_test += 1
sl_no = row[0]
tw = row[1]
Type = row[2]
word_forms = row[3].split(',')
description = row[4]
tw_node = {'dgraph.type': 'TWNode', 'translationWord': tw, 'slNo': sl_no, 'twType': Type, 'description': description, 'belongsTo': {'uid': dict_node_uid}}
if (len(word_forms) > 0):
tw_node['wordForms'] = word_forms
try:
tw_node_uid = graph_conn.create_data(tw_node)
except Exception as e:
logging.error('At tw node creation')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info(('tw_node_uid:%s' % tw_node_uid))
return {'msg': 'Added to graph'} | @app.post('/translationwords', status_code=201, tags=['WRITE', 'Translation Words'])
def add_translationwords():
'creates a translation word dictionary.\n\t Collects tw data from CSV file and adds to graph \n\t '
tw_path = 'Resources/translationWords/tws.csv'
nodename = 'translation words'
dict_node = {'dictionary': nodename, 'dgraph.type': 'DictionaryNode'}
try:
dict_node_uid = graph_conn.create_data(dict_node)
except Exception as e:
logging.error('At dict node creation')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info(('dict_node_uid:%s' % dict_node_uid))
count_for_test = 0
with open(tw_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter='\t')
for row in csv_reader:
count_for_test += 1
sl_no = row[0]
tw = row[1]
Type = row[2]
word_forms = row[3].split(',')
description = row[4]
tw_node = {'dgraph.type': 'TWNode', 'translationWord': tw, 'slNo': sl_no, 'twType': Type, 'description': description, 'belongsTo': {'uid': dict_node_uid}}
if (len(word_forms) > 0):
tw_node['wordForms'] = word_forms
try:
tw_node_uid = graph_conn.create_data(tw_node)
except Exception as e:
logging.error('At tw node creation')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info(('tw_node_uid:%s' % tw_node_uid))
return {'msg': 'Added to graph'}<|docstring|>creates a translation word dictionary.
Collects tw data from CSV file and adds to graph<|endoftext|> |
87d0fafdfc9813105a8499d3e9eaea8b2e7c4fc92017d8d5f9c58ea5e1b19c84 | @app.get('/bibles', status_code=200, tags=['READ', 'Bible Contents'])
def get_bibles(bible_name: Optional[str]=None, language: Optional[str]=None, skip: Optional[int]=None, limit: Optional[int]=None):
' fetches bibles nodes, properties and available books. \n\tIf no query params are given, all bibles in graph are fetched.\n\tIf bible_name is specified, only that node is returned.\n\tIf only language if given, all bible nodes, and details vavailable in that language is returned\n\tNumber of items returned can be set using the skip and limit parameters.\n\t'
result = {}
try:
if ((not bible_name) and (not language)):
query_res = graph_conn.query_data(all_bibles_query, {'$dummy': ''})
elif bible_name:
query_res = graph_conn.query_data(bible_name_query, {'$bib': bible_name})
logging.info(('query_res: %s' % query_res))
else:
query_res = graph_conn.query_data(bible_lang_query, {'$lang': language})
logging.info(('query_res: %s' % query_res))
except Exception as e:
logging.error('At fetching Bibles')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info(('skip: %s, limit %s' % (skip, limit)))
if (not skip):
skip = (- 1)
if (not limit):
limit = len(query_res['bibles'])
result['bibles'] = query_res['bibles'][(skip + 1):limit]
return result | fetches bibles nodes, properties and available books.
If no query params are given, all bibles in graph are fetched.
If bible_name is specified, only that node is returned.
If only language if given, all bible nodes, and details vavailable in that language is returned
Number of items returned can be set using the skip and limit parameters. | dgraph/dGraph_fastAPI_server.py | get_bibles | kavitharaju/vachan-graph | 3 | python | @app.get('/bibles', status_code=200, tags=['READ', 'Bible Contents'])
def get_bibles(bible_name: Optional[str]=None, language: Optional[str]=None, skip: Optional[int]=None, limit: Optional[int]=None):
' fetches bibles nodes, properties and available books. \n\tIf no query params are given, all bibles in graph are fetched.\n\tIf bible_name is specified, only that node is returned.\n\tIf only language if given, all bible nodes, and details vavailable in that language is returned\n\tNumber of items returned can be set using the skip and limit parameters.\n\t'
result = {}
try:
if ((not bible_name) and (not language)):
query_res = graph_conn.query_data(all_bibles_query, {'$dummy': })
elif bible_name:
query_res = graph_conn.query_data(bible_name_query, {'$bib': bible_name})
logging.info(('query_res: %s' % query_res))
else:
query_res = graph_conn.query_data(bible_lang_query, {'$lang': language})
logging.info(('query_res: %s' % query_res))
except Exception as e:
logging.error('At fetching Bibles')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info(('skip: %s, limit %s' % (skip, limit)))
if (not skip):
skip = (- 1)
if (not limit):
limit = len(query_res['bibles'])
result['bibles'] = query_res['bibles'][(skip + 1):limit]
return result | @app.get('/bibles', status_code=200, tags=['READ', 'Bible Contents'])
def get_bibles(bible_name: Optional[str]=None, language: Optional[str]=None, skip: Optional[int]=None, limit: Optional[int]=None):
' fetches bibles nodes, properties and available books. \n\tIf no query params are given, all bibles in graph are fetched.\n\tIf bible_name is specified, only that node is returned.\n\tIf only language if given, all bible nodes, and details vavailable in that language is returned\n\tNumber of items returned can be set using the skip and limit parameters.\n\t'
result = {}
try:
if ((not bible_name) and (not language)):
query_res = graph_conn.query_data(all_bibles_query, {'$dummy': })
elif bible_name:
query_res = graph_conn.query_data(bible_name_query, {'$bib': bible_name})
logging.info(('query_res: %s' % query_res))
else:
query_res = graph_conn.query_data(bible_lang_query, {'$lang': language})
logging.info(('query_res: %s' % query_res))
except Exception as e:
logging.error('At fetching Bibles')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info(('skip: %s, limit %s' % (skip, limit)))
if (not skip):
skip = (- 1)
if (not limit):
limit = len(query_res['bibles'])
result['bibles'] = query_res['bibles'][(skip + 1):limit]
return result<|docstring|>fetches bibles nodes, properties and available books.
If no query params are given, all bibles in graph are fetched.
If bible_name is specified, only that node is returned.
If only language if given, all bible nodes, and details vavailable in that language is returned
Number of items returned can be set using the skip and limit parameters.<|endoftext|> |
2fb834d1bb79d1cce7c674b73d61da90eb6ab8a043e0a5530ab3868a4ff81e08 | @app.put('/bibles/{bible_name}', status_code=200, tags=['WRITE', 'Bible Contents'])
def edit_bible(bible_name: str, key_values: List[BiblePropertyValue]):
' Update a property value of selected bible node'
logging.info(('input args bible_name: %s, key_values: %s' % (bible_name, key_values)))
nquad = ''
for prop in key_values:
nquad += ('uid(u) <%s> "%s" .\n' % (prop.property.value, prop.value))
logging.info(('nquad: %s' % nquad))
try:
graph_conn.upsert(query=bible_node_query, nquad=nquad, variables={'$bib': bible_name})
except Exception as e:
logging.error('At editing Bible ')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
raise HTTPException(status_code=503, detail='Not implemented properly. ') | Update a property value of selected bible node | dgraph/dGraph_fastAPI_server.py | edit_bible | kavitharaju/vachan-graph | 3 | python | @app.put('/bibles/{bible_name}', status_code=200, tags=['WRITE', 'Bible Contents'])
def edit_bible(bible_name: str, key_values: List[BiblePropertyValue]):
' '
logging.info(('input args bible_name: %s, key_values: %s' % (bible_name, key_values)))
nquad =
for prop in key_values:
nquad += ('uid(u) <%s> "%s" .\n' % (prop.property.value, prop.value))
logging.info(('nquad: %s' % nquad))
try:
graph_conn.upsert(query=bible_node_query, nquad=nquad, variables={'$bib': bible_name})
except Exception as e:
logging.error('At editing Bible ')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
raise HTTPException(status_code=503, detail='Not implemented properly. ') | @app.put('/bibles/{bible_name}', status_code=200, tags=['WRITE', 'Bible Contents'])
def edit_bible(bible_name: str, key_values: List[BiblePropertyValue]):
' '
logging.info(('input args bible_name: %s, key_values: %s' % (bible_name, key_values)))
nquad =
for prop in key_values:
nquad += ('uid(u) <%s> "%s" .\n' % (prop.property.value, prop.value))
logging.info(('nquad: %s' % nquad))
try:
graph_conn.upsert(query=bible_node_query, nquad=nquad, variables={'$bib': bible_name})
except Exception as e:
logging.error('At editing Bible ')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
raise HTTPException(status_code=503, detail='Not implemented properly. ')<|docstring|>Update a property value of selected bible node<|endoftext|> |
fdcf295d22e97ec44af1e98b35983cc96022078aa74b31cc6f87c6a5cc6e594c | def normalize_unicode(text, form='NFKC'):
'to normalize text contents before adding them to DB'
return unicodedata.normalize(form, text) | to normalize text contents before adding them to DB | dgraph/dGraph_fastAPI_server.py | normalize_unicode | kavitharaju/vachan-graph | 3 | python | def normalize_unicode(text, form='NFKC'):
return unicodedata.normalize(form, text) | def normalize_unicode(text, form='NFKC'):
return unicodedata.normalize(form, text)<|docstring|>to normalize text contents before adding them to DB<|endoftext|> |
3652a01ef2cf11d971aee0745601deff14dda0bf6d72b4e662c62d03a52ee60a | def parse_usfm(usfm_string):
'converts an uploaded usfm text to a JSON using usfm-grammar'
if isinstance(usfm_string, bytes):
usfm_string = usfm_string.decode('UTF-8')
file = open('temp.usfm', 'w')
file.write(usfm_string)
file.close()
process = subprocess.Popen(['/usr/bin/usfm-grammar temp.usfm --level=relaxed --filter=scripture'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(stdout, stderr) = process.communicate()
if stderr:
raise Exception(stderr.decode('utf-8'))
usfm_json = json.loads(stdout.decode('utf-8'))
return usfm_json | converts an uploaded usfm text to a JSON using usfm-grammar | dgraph/dGraph_fastAPI_server.py | parse_usfm | kavitharaju/vachan-graph | 3 | python | def parse_usfm(usfm_string):
if isinstance(usfm_string, bytes):
usfm_string = usfm_string.decode('UTF-8')
file = open('temp.usfm', 'w')
file.write(usfm_string)
file.close()
process = subprocess.Popen(['/usr/bin/usfm-grammar temp.usfm --level=relaxed --filter=scripture'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(stdout, stderr) = process.communicate()
if stderr:
raise Exception(stderr.decode('utf-8'))
usfm_json = json.loads(stdout.decode('utf-8'))
return usfm_json | def parse_usfm(usfm_string):
if isinstance(usfm_string, bytes):
usfm_string = usfm_string.decode('UTF-8')
file = open('temp.usfm', 'w')
file.write(usfm_string)
file.close()
process = subprocess.Popen(['/usr/bin/usfm-grammar temp.usfm --level=relaxed --filter=scripture'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(stdout, stderr) = process.communicate()
if stderr:
raise Exception(stderr.decode('utf-8'))
usfm_json = json.loads(stdout.decode('utf-8'))
return usfm_json<|docstring|>converts an uploaded usfm text to a JSON using usfm-grammar<|endoftext|> |
6ca0d162616850ad7ebc91af95576e59ac63067acff90bb3a1a1646cc72b4a6e | @app.post('/bibles/usfm', status_code=200, tags=['WRITE', 'Bible Contents'])
def add_bible_usfm(bible_name: str=Body('Hindi IRV4 bible'), language: str=Body('Hindi'), version: str=Body('IRV4'), usfm_file: UploadFile=File(...)):
'Processes the usfm and adds contents to corresponding bible(creates new bible if not present already)'
usfm = usfm_file.file.read()
connect_Graph()
try:
bibNode_query_res = graph_conn.query_data(bible_uid_query, {'$bib': bible_name})
except Exception as e:
logging.error('At fetching Bible uid')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if (len(bibNode_query_res['bible']) == 0):
bib_node = {'dgraph.type': 'BibleNode', 'bible': bible_name, 'language': language, 'version': str(version)}
try:
bib_node_uid = graph_conn.create_data(bib_node)
logging.info(('bib_node_uid: %s' % bib_node_uid))
except Exception as e:
logging.error('At creating Bible node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
elif (len(bibNode_query_res['bible']) > 1):
logging.error('At fetching Bible uid')
logging.error('matched multiple bible nodes')
raise HTTPException(status_code=500, detail=('Graph side error. ' + ' matched multiple bible nodes'))
else:
bib_node_uid = bibNode_query_res['bible'][0]['uid']
book_json = parse_usfm(usfm)
book_code = book_json['book']['bookCode'].upper()
book_num = book_num_map[book_code.upper()]
variables = {'$bib': bib_node_uid, '$book': book_code}
try:
bookNode_query_res = graph_conn.query_data(bookNode_query, variables)
except Exception as e:
logging.error('At fetching book node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if (len(bookNode_query_res['book']) == 0):
bookNode = {'dgraph.type': 'BookNode', 'book': book_code, 'bookNumber': book_num, 'belongsTo': {'uid': bib_node_uid}}
try:
bookNode_uid = graph_conn.create_data(bookNode)
except Exception as e:
logging.error('At creating book node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
elif (len(bookNode_query_res['book']) > 1):
logging.error('At fetching book node')
logging.error('Matched multiple book nodes')
raise HTTPException(status_code=500, detail='Graph side error. Matched multiple book nodes')
else:
bookNode_uid = bookNode_query_res['book'][0]['uid']
for chapter in book_json['chapters']:
chapter_num = chapter['chapterNumber']
variables = {'$book': bookNode_uid, '$chap': str(chapter_num)}
try:
chapNode_query_res = graph_conn.query_data(chapNode_query, variables)
except Exception as e:
logging.error('At fetching chapter node')
logging.error(e)
raise HTTPException(status_code=500, detail=('Graph side error. ' + str(e)))
if (len(chapNode_query_res['chapter']) == 0):
chapNode = {'dgraph.type': 'ChapterNode', 'chapter': chapter_num, 'belongsTo': {'uid': bookNode_uid}}
try:
chapNode_uid = graph_conn.create_data(chapNode)
except Exception as e:
logging.error('At creating chapter node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
elif (len(chapNode_query_res['chapter']) > 1):
logging.error('At fetching chapter node')
logging.error('Matched multiple chapter nodes')
raise HTTPException(status_code=500, detail='Graph side error. Matched multiple chapter nodes')
else:
chapNode_uid = chapNode_query_res['chapter'][0]['uid']
for content in chapter['contents']:
if ('verseNumber' in content):
verse_num = content['verseNumber']
verse_text = content['verseText']
ref_string = ((((book_code + ' ') + str(chapter_num)) + ':') + str(verse_num))
variables = {'$chapter': chapNode_uid, '$verse': str(verse_num)}
try:
verseNode_query_res = graph_conn.query_data(verseNode_query, variables)
except Exception as e:
logging.error('At fetching verse node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if (len(verseNode_query_res['verse']) == 0):
verseNode = {'dgraph.type': 'VerseNode', 'verse': verse_num, 'refString': ref_string, 'verseText': verse_text, 'belongsTo': {'uid': chapNode_uid}}
try:
verseNode_uid = graph_conn.create_data(verseNode)
except Exception as e:
logging.error('At creating verse node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
elif (len(verseNode_query_res['verse']) > 1):
logging.error('At creating chapter node')
logging.error('Matched multiple verse nodes')
raise HTTPException(status_code=500, detail='Graph side error. Matched multiple verse nodes')
else:
verseNode_uid = verseNode_query_res['verse'][0]['uid']
clean_text = re.sub(punct_pattern, ' ', verse_text)
words = re.split('\\s+', clean_text)
for (i, word) in enumerate(words):
wordNode = {'dgraph.type': 'WordNode', 'word': word, 'belongsTo': {'uid': verseNode_uid}, 'position': i}
try:
wordNode_uid = graph_conn.create_data(wordNode)
except Exception as e:
logging.error('At creating word node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info(('wordNode_uid:%s' % wordNode_uid))
return {'message': 'usfm added'} | Processes the usfm and adds contents to corresponding bible(creates new bible if not present already) | dgraph/dGraph_fastAPI_server.py | add_bible_usfm | kavitharaju/vachan-graph | 3 | python | @app.post('/bibles/usfm', status_code=200, tags=['WRITE', 'Bible Contents'])
def add_bible_usfm(bible_name: str=Body('Hindi IRV4 bible'), language: str=Body('Hindi'), version: str=Body('IRV4'), usfm_file: UploadFile=File(...)):
usfm = usfm_file.file.read()
connect_Graph()
try:
bibNode_query_res = graph_conn.query_data(bible_uid_query, {'$bib': bible_name})
except Exception as e:
logging.error('At fetching Bible uid')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if (len(bibNode_query_res['bible']) == 0):
bib_node = {'dgraph.type': 'BibleNode', 'bible': bible_name, 'language': language, 'version': str(version)}
try:
bib_node_uid = graph_conn.create_data(bib_node)
logging.info(('bib_node_uid: %s' % bib_node_uid))
except Exception as e:
logging.error('At creating Bible node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
elif (len(bibNode_query_res['bible']) > 1):
logging.error('At fetching Bible uid')
logging.error('matched multiple bible nodes')
raise HTTPException(status_code=500, detail=('Graph side error. ' + ' matched multiple bible nodes'))
else:
bib_node_uid = bibNode_query_res['bible'][0]['uid']
book_json = parse_usfm(usfm)
book_code = book_json['book']['bookCode'].upper()
book_num = book_num_map[book_code.upper()]
variables = {'$bib': bib_node_uid, '$book': book_code}
try:
bookNode_query_res = graph_conn.query_data(bookNode_query, variables)
except Exception as e:
logging.error('At fetching book node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if (len(bookNode_query_res['book']) == 0):
bookNode = {'dgraph.type': 'BookNode', 'book': book_code, 'bookNumber': book_num, 'belongsTo': {'uid': bib_node_uid}}
try:
bookNode_uid = graph_conn.create_data(bookNode)
except Exception as e:
logging.error('At creating book node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
elif (len(bookNode_query_res['book']) > 1):
logging.error('At fetching book node')
logging.error('Matched multiple book nodes')
raise HTTPException(status_code=500, detail='Graph side error. Matched multiple book nodes')
else:
bookNode_uid = bookNode_query_res['book'][0]['uid']
for chapter in book_json['chapters']:
chapter_num = chapter['chapterNumber']
variables = {'$book': bookNode_uid, '$chap': str(chapter_num)}
try:
chapNode_query_res = graph_conn.query_data(chapNode_query, variables)
except Exception as e:
logging.error('At fetching chapter node')
logging.error(e)
raise HTTPException(status_code=500, detail=('Graph side error. ' + str(e)))
if (len(chapNode_query_res['chapter']) == 0):
chapNode = {'dgraph.type': 'ChapterNode', 'chapter': chapter_num, 'belongsTo': {'uid': bookNode_uid}}
try:
chapNode_uid = graph_conn.create_data(chapNode)
except Exception as e:
logging.error('At creating chapter node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
elif (len(chapNode_query_res['chapter']) > 1):
logging.error('At fetching chapter node')
logging.error('Matched multiple chapter nodes')
raise HTTPException(status_code=500, detail='Graph side error. Matched multiple chapter nodes')
else:
chapNode_uid = chapNode_query_res['chapter'][0]['uid']
for content in chapter['contents']:
if ('verseNumber' in content):
verse_num = content['verseNumber']
verse_text = content['verseText']
ref_string = ((((book_code + ' ') + str(chapter_num)) + ':') + str(verse_num))
variables = {'$chapter': chapNode_uid, '$verse': str(verse_num)}
try:
verseNode_query_res = graph_conn.query_data(verseNode_query, variables)
except Exception as e:
logging.error('At fetching verse node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if (len(verseNode_query_res['verse']) == 0):
verseNode = {'dgraph.type': 'VerseNode', 'verse': verse_num, 'refString': ref_string, 'verseText': verse_text, 'belongsTo': {'uid': chapNode_uid}}
try:
verseNode_uid = graph_conn.create_data(verseNode)
except Exception as e:
logging.error('At creating verse node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
elif (len(verseNode_query_res['verse']) > 1):
logging.error('At creating chapter node')
logging.error('Matched multiple verse nodes')
raise HTTPException(status_code=500, detail='Graph side error. Matched multiple verse nodes')
else:
verseNode_uid = verseNode_query_res['verse'][0]['uid']
clean_text = re.sub(punct_pattern, ' ', verse_text)
words = re.split('\\s+', clean_text)
for (i, word) in enumerate(words):
wordNode = {'dgraph.type': 'WordNode', 'word': word, 'belongsTo': {'uid': verseNode_uid}, 'position': i}
try:
wordNode_uid = graph_conn.create_data(wordNode)
except Exception as e:
logging.error('At creating word node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info(('wordNode_uid:%s' % wordNode_uid))
return {'message': 'usfm added'} | @app.post('/bibles/usfm', status_code=200, tags=['WRITE', 'Bible Contents'])
def add_bible_usfm(bible_name: str=Body('Hindi IRV4 bible'), language: str=Body('Hindi'), version: str=Body('IRV4'), usfm_file: UploadFile=File(...)):
usfm = usfm_file.file.read()
connect_Graph()
try:
bibNode_query_res = graph_conn.query_data(bible_uid_query, {'$bib': bible_name})
except Exception as e:
logging.error('At fetching Bible uid')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if (len(bibNode_query_res['bible']) == 0):
bib_node = {'dgraph.type': 'BibleNode', 'bible': bible_name, 'language': language, 'version': str(version)}
try:
bib_node_uid = graph_conn.create_data(bib_node)
logging.info(('bib_node_uid: %s' % bib_node_uid))
except Exception as e:
logging.error('At creating Bible node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
elif (len(bibNode_query_res['bible']) > 1):
logging.error('At fetching Bible uid')
logging.error('matched multiple bible nodes')
raise HTTPException(status_code=500, detail=('Graph side error. ' + ' matched multiple bible nodes'))
else:
bib_node_uid = bibNode_query_res['bible'][0]['uid']
book_json = parse_usfm(usfm)
book_code = book_json['book']['bookCode'].upper()
book_num = book_num_map[book_code.upper()]
variables = {'$bib': bib_node_uid, '$book': book_code}
try:
bookNode_query_res = graph_conn.query_data(bookNode_query, variables)
except Exception as e:
logging.error('At fetching book node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if (len(bookNode_query_res['book']) == 0):
bookNode = {'dgraph.type': 'BookNode', 'book': book_code, 'bookNumber': book_num, 'belongsTo': {'uid': bib_node_uid}}
try:
bookNode_uid = graph_conn.create_data(bookNode)
except Exception as e:
logging.error('At creating book node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
elif (len(bookNode_query_res['book']) > 1):
logging.error('At fetching book node')
logging.error('Matched multiple book nodes')
raise HTTPException(status_code=500, detail='Graph side error. Matched multiple book nodes')
else:
bookNode_uid = bookNode_query_res['book'][0]['uid']
for chapter in book_json['chapters']:
chapter_num = chapter['chapterNumber']
variables = {'$book': bookNode_uid, '$chap': str(chapter_num)}
try:
chapNode_query_res = graph_conn.query_data(chapNode_query, variables)
except Exception as e:
logging.error('At fetching chapter node')
logging.error(e)
raise HTTPException(status_code=500, detail=('Graph side error. ' + str(e)))
if (len(chapNode_query_res['chapter']) == 0):
chapNode = {'dgraph.type': 'ChapterNode', 'chapter': chapter_num, 'belongsTo': {'uid': bookNode_uid}}
try:
chapNode_uid = graph_conn.create_data(chapNode)
except Exception as e:
logging.error('At creating chapter node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
elif (len(chapNode_query_res['chapter']) > 1):
logging.error('At fetching chapter node')
logging.error('Matched multiple chapter nodes')
raise HTTPException(status_code=500, detail='Graph side error. Matched multiple chapter nodes')
else:
chapNode_uid = chapNode_query_res['chapter'][0]['uid']
for content in chapter['contents']:
if ('verseNumber' in content):
verse_num = content['verseNumber']
verse_text = content['verseText']
ref_string = ((((book_code + ' ') + str(chapter_num)) + ':') + str(verse_num))
variables = {'$chapter': chapNode_uid, '$verse': str(verse_num)}
try:
verseNode_query_res = graph_conn.query_data(verseNode_query, variables)
except Exception as e:
logging.error('At fetching verse node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if (len(verseNode_query_res['verse']) == 0):
verseNode = {'dgraph.type': 'VerseNode', 'verse': verse_num, 'refString': ref_string, 'verseText': verse_text, 'belongsTo': {'uid': chapNode_uid}}
try:
verseNode_uid = graph_conn.create_data(verseNode)
except Exception as e:
logging.error('At creating verse node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
elif (len(verseNode_query_res['verse']) > 1):
logging.error('At creating chapter node')
logging.error('Matched multiple verse nodes')
raise HTTPException(status_code=500, detail='Graph side error. Matched multiple verse nodes')
else:
verseNode_uid = verseNode_query_res['verse'][0]['uid']
clean_text = re.sub(punct_pattern, ' ', verse_text)
words = re.split('\\s+', clean_text)
for (i, word) in enumerate(words):
wordNode = {'dgraph.type': 'WordNode', 'word': word, 'belongsTo': {'uid': verseNode_uid}, 'position': i}
try:
wordNode_uid = graph_conn.create_data(wordNode)
except Exception as e:
logging.error('At creating word node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info(('wordNode_uid:%s' % wordNode_uid))
return {'message': 'usfm added'}<|docstring|>Processes the usfm and adds contents to corresponding bible(creates new bible if not present already)<|endoftext|> |
c7de5a8e6ff9b3d8adea3622992741e8aa07c2689499832c72ea1983077bd03d | @app.post('/bibles', status_code=200, tags=['WRITE', 'Bible Contents'])
def add_bible(bible_name: str=Body('Hindi IRV4 bible'), language: str=Body('Hindi'), version: str=Body('IRV4'), tablename: str=Body('Hin_IRV4_BibleWord'), bookcode: BibleBook=Body(BibleBook.mat)):
' create a bible node, fetches contents from specified table in MySQL DB and adds to Graph.\n\tCurrently the API is implemented to add only one book at a time. \n\tThis is due to the amount of time required.'
try:
bibNode_query_res = graph_conn.query_data(bible_uid_query, {'$bib': bible_name})
except Exception as e:
logging.error('At fetching Bible uid')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if (len(bibNode_query_res['bible']) == 0):
bib_node = {'dgraph.type': 'BibleNode', 'bible': bible_name, 'language': language, 'version': str(version)}
try:
bib_node_uid = graph_conn.create_data(bib_node)
logging.info(('bib_node_uid: %s' % bib_node_uid))
except Exception as e:
logging.error('At creating Bible node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
elif (len(bibNode_query_res['bible']) > 1):
logging.error('At fetching Bible uid')
logging.error('matched multiple bible nodes')
raise HTTPException(status_code=500, detail=('Graph side error. ' + ' matched multiple bible nodes'))
else:
bib_node_uid = bibNode_query_res['bible'][0]['uid']
try:
db = pymysql.connect(host='localhost', database=rel_db_name, user='root', password='password', charset='utf8mb4')
cursor = db.cursor(pymysql.cursors.SSCursor)
except Exception as e:
logging.error('At connecting to MYSQL')
logging.error(e)
raise HTTPException(status_code=502, detail=('MySQL side error. ' + str(e)))
try:
if (bible_name == 'Grk UGNT4 bible'):
Morph_sequence = ['Role', 'Type', 'Mood', 'Tense', 'Voice', 'Person', 'Case', 'Gender', 'Number', 'Degree']
cursor.execute((('Select LID, Position, Word, Map.Book, Chapter, Verse,lookup.Book, Strongs, Morph, Pronunciation, TW, lookup.Code from ' + tablename) + ' JOIN Bcv_LidMap as Map ON LID=Map.ID JOIN Bible_Book_Lookup as lookup ON lookup.ID=Map.Book where lookup.ID = %s order by LID, Position'), book_num_map[bookcode.value])
else:
cursor.execute((('Select LID, Position, Word, Map.Book, Chapter, Verse,lookup.Book, lookup.Code from ' + tablename) + ' JOIN Bcv_LidMap as Map ON LID=Map.ID JOIN Bible_Book_Lookup as lookup ON lookup.ID=Map.Book where lookup.ID=%s order by LID, Position'), book_num_map[bookcode.value])
except Exception as e:
logging.error('At fetching data from MYSQL')
logging.error(e)
raise HTTPException(status_code=502, detail=('MySQL side error. ' + str(e)))
count_for_test = 0
chapNode = None
while True:
next_row = cursor.fetchone()
if (not next_row):
break
count_for_test += 1
LID = next_row[0]
Position = next_row[1]
Word = next_row[2]
BookNum = next_row[3]
Chapter = next_row[4]
Verse = next_row[5]
BookName = next_row[6]
book_code = next_row[(- 1)]
if (bible_name == 'Grk UGNT4 bible'):
Strongs = next_row[7]
Morph = next_row[8].split(',')
Pronunciation = next_row[9]
TW_fullString = next_row[10]
logging.info(((((('Book,Chapter,Verse:' + str(BookNum)) + ',') + str(Chapter)) + ',') + str(Verse)))
variables = {'$bib': bib_node_uid, '$book': BookName}
try:
bookNode_query_res = graph_conn.query_data(bookNode_query, variables)
except Exception as e:
logging.error('At fetching book node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if (len(bookNode_query_res['book']) == 0):
bookNode = {'dgraph.type': 'BookNode', 'book': BookName, 'bookNumber': BookNum, 'belongsTo': {'uid': bib_node_uid}}
try:
bookNode_uid = graph_conn.create_data(bookNode)
except Exception as e:
logging.error('At creating book node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
elif (len(bookNode_query_res['book']) > 1):
logging.error('At fetching book node')
logging.error('Matched multiple book nodes')
raise HTTPException(status_code=500, detail='Graph side error. Matched multiple book nodes')
else:
bookNode_uid = bookNode_query_res['book'][0]['uid']
variables = {'$book': bookNode_uid, '$chap': str(Chapter)}
try:
chapNode_query_res = graph_conn.query_data(chapNode_query, variables)
except Exception as e:
logging.error('At fetching chapter node')
logging.error(e)
raise HTTPException(status_code=500, detail=('Graph side error. ' + str(e)))
if (len(chapNode_query_res['chapter']) == 0):
chapNode = {'dgraph.type': 'ChapterNode', 'chapter': Chapter, 'belongsTo': {'uid': bookNode_uid}}
try:
chapNode_uid = graph_conn.create_data(chapNode)
except Exception as e:
logging.error('At creating chapter node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
elif (len(chapNode_query_res['chapter']) > 1):
logging.error('At fetching chapter node')
logging.error('Matched multiple chapter nodes')
raise HTTPException(status_code=500, detail='Graph side error. Matched multiple chapter nodes')
else:
chapNode_uid = chapNode_query_res['chapter'][0]['uid']
variables = {'$chapter': chapNode_uid, '$verse': str(Verse)}
try:
verseNode_query_res = graph_conn.query_data(verseNode_query, variables)
except Exception as e:
logging.error('At fetching verse node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if (len(verseNode_query_res['verse']) == 0):
verseNode = {'dgraph.type': 'VerseNode', 'verse': Verse, 'refString': ((((book_code + ' ') + str(Chapter)) + ':') + str(Verse)), 'belongsTo': {'uid': chapNode_uid}, 'lid': LID}
try:
verseNode_uid = graph_conn.create_data(verseNode)
except Exception as e:
logging.error('At creating verse node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
elif (len(verseNode_query_res['verse']) > 1):
logging.error('At creating chapter node')
logging.error('Matched multiple verse nodes')
raise HTTPException(status_code=500, detail='Graph side error. Matched multiple verse nodes')
else:
verseNode_uid = verseNode_query_res['verse'][0]['uid']
wordNode = {'dgraph.type': 'WordNode', 'word': Word, 'belongsTo': {'uid': verseNode_uid}, 'position': Position}
if (bible_name == 'Grk UGNT4 bible'):
wordNode['pronunciation'] = Pronunciation
for (key, value) in zip(Morph_sequence, Morph):
if (value != ''):
wordNode[key] = value
variables = {'$strongnum': str(Strongs)}
try:
strongNode_query_res = graph_conn.query_data(strongNode_query, variables)
except Exception as e:
logging.error('At fetching strong node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info('strongNode_query_res:', strongNode_query_res)
if (len(strongNode_query_res['strongs']) > 0):
strongNode_uid = strongNode_query_res['strongs'][0]['uid']
wordNode['strongsLink'] = {'uid': strongNode_uid}
if (TW_fullString != '-'):
(Type, word) = TW_fullString.split('/')[(- 2):]
variables = {'$word': word}
try:
twNode_query_res = graph_conn.query_data(twNode_query, variables)
except Exception as e:
logging.error('At fetching tw node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if (len(twNode_query_res['tw']) > 0):
twNode_uid = twNode_query_res['tw'][0]['uid']
wordNode['twLink'] = {'uid': twNode_uid}
try:
wordNode_uid = graph_conn.create_data(wordNode)
except Exception as e:
logging.error('At creating word node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info(('wordNode_uid:%s' % wordNode_uid))
cursor.close()
db.close()
text_tablename = tablename.replace('BibleWord', 'Text')
if (text_tablename == 'Grk_UGNT4_Text'):
text_tablename = 'Grk_UGNT_Text'
add_verseTextToBible(bib_node_uid, text_tablename, bookcode.value)
return {'msg': ('Added %s in %s' % (bookcode, bible_name))} | create a bible node, fetches contents from specified table in MySQL DB and adds to Graph.
Currently the API is implemented to add only one book at a time.
This is due to the amount of time required. | dgraph/dGraph_fastAPI_server.py | add_bible | kavitharaju/vachan-graph | 3 | python | @app.post('/bibles', status_code=200, tags=['WRITE', 'Bible Contents'])
def add_bible(bible_name: str=Body('Hindi IRV4 bible'), language: str=Body('Hindi'), version: str=Body('IRV4'), tablename: str=Body('Hin_IRV4_BibleWord'), bookcode: BibleBook=Body(BibleBook.mat)):
' create a bible node, fetches contents from specified table in MySQL DB and adds to Graph.\n\tCurrently the API is implemented to add only one book at a time. \n\tThis is due to the amount of time required.'
try:
bibNode_query_res = graph_conn.query_data(bible_uid_query, {'$bib': bible_name})
except Exception as e:
logging.error('At fetching Bible uid')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if (len(bibNode_query_res['bible']) == 0):
bib_node = {'dgraph.type': 'BibleNode', 'bible': bible_name, 'language': language, 'version': str(version)}
try:
bib_node_uid = graph_conn.create_data(bib_node)
logging.info(('bib_node_uid: %s' % bib_node_uid))
except Exception as e:
logging.error('At creating Bible node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
elif (len(bibNode_query_res['bible']) > 1):
logging.error('At fetching Bible uid')
logging.error('matched multiple bible nodes')
raise HTTPException(status_code=500, detail=('Graph side error. ' + ' matched multiple bible nodes'))
else:
bib_node_uid = bibNode_query_res['bible'][0]['uid']
try:
db = pymysql.connect(host='localhost', database=rel_db_name, user='root', password='password', charset='utf8mb4')
cursor = db.cursor(pymysql.cursors.SSCursor)
except Exception as e:
logging.error('At connecting to MYSQL')
logging.error(e)
raise HTTPException(status_code=502, detail=('MySQL side error. ' + str(e)))
try:
if (bible_name == 'Grk UGNT4 bible'):
Morph_sequence = ['Role', 'Type', 'Mood', 'Tense', 'Voice', 'Person', 'Case', 'Gender', 'Number', 'Degree']
cursor.execute((('Select LID, Position, Word, Map.Book, Chapter, Verse,lookup.Book, Strongs, Morph, Pronunciation, TW, lookup.Code from ' + tablename) + ' JOIN Bcv_LidMap as Map ON LID=Map.ID JOIN Bible_Book_Lookup as lookup ON lookup.ID=Map.Book where lookup.ID = %s order by LID, Position'), book_num_map[bookcode.value])
else:
cursor.execute((('Select LID, Position, Word, Map.Book, Chapter, Verse,lookup.Book, lookup.Code from ' + tablename) + ' JOIN Bcv_LidMap as Map ON LID=Map.ID JOIN Bible_Book_Lookup as lookup ON lookup.ID=Map.Book where lookup.ID=%s order by LID, Position'), book_num_map[bookcode.value])
except Exception as e:
logging.error('At fetching data from MYSQL')
logging.error(e)
raise HTTPException(status_code=502, detail=('MySQL side error. ' + str(e)))
count_for_test = 0
chapNode = None
while True:
next_row = cursor.fetchone()
if (not next_row):
break
count_for_test += 1
LID = next_row[0]
Position = next_row[1]
Word = next_row[2]
BookNum = next_row[3]
Chapter = next_row[4]
Verse = next_row[5]
BookName = next_row[6]
book_code = next_row[(- 1)]
if (bible_name == 'Grk UGNT4 bible'):
Strongs = next_row[7]
Morph = next_row[8].split(',')
Pronunciation = next_row[9]
TW_fullString = next_row[10]
logging.info(((((('Book,Chapter,Verse:' + str(BookNum)) + ',') + str(Chapter)) + ',') + str(Verse)))
variables = {'$bib': bib_node_uid, '$book': BookName}
try:
bookNode_query_res = graph_conn.query_data(bookNode_query, variables)
except Exception as e:
logging.error('At fetching book node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if (len(bookNode_query_res['book']) == 0):
bookNode = {'dgraph.type': 'BookNode', 'book': BookName, 'bookNumber': BookNum, 'belongsTo': {'uid': bib_node_uid}}
try:
bookNode_uid = graph_conn.create_data(bookNode)
except Exception as e:
logging.error('At creating book node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
elif (len(bookNode_query_res['book']) > 1):
logging.error('At fetching book node')
logging.error('Matched multiple book nodes')
raise HTTPException(status_code=500, detail='Graph side error. Matched multiple book nodes')
else:
bookNode_uid = bookNode_query_res['book'][0]['uid']
variables = {'$book': bookNode_uid, '$chap': str(Chapter)}
try:
chapNode_query_res = graph_conn.query_data(chapNode_query, variables)
except Exception as e:
logging.error('At fetching chapter node')
logging.error(e)
raise HTTPException(status_code=500, detail=('Graph side error. ' + str(e)))
if (len(chapNode_query_res['chapter']) == 0):
chapNode = {'dgraph.type': 'ChapterNode', 'chapter': Chapter, 'belongsTo': {'uid': bookNode_uid}}
try:
chapNode_uid = graph_conn.create_data(chapNode)
except Exception as e:
logging.error('At creating chapter node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
elif (len(chapNode_query_res['chapter']) > 1):
logging.error('At fetching chapter node')
logging.error('Matched multiple chapter nodes')
raise HTTPException(status_code=500, detail='Graph side error. Matched multiple chapter nodes')
else:
chapNode_uid = chapNode_query_res['chapter'][0]['uid']
variables = {'$chapter': chapNode_uid, '$verse': str(Verse)}
try:
verseNode_query_res = graph_conn.query_data(verseNode_query, variables)
except Exception as e:
logging.error('At fetching verse node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if (len(verseNode_query_res['verse']) == 0):
verseNode = {'dgraph.type': 'VerseNode', 'verse': Verse, 'refString': ((((book_code + ' ') + str(Chapter)) + ':') + str(Verse)), 'belongsTo': {'uid': chapNode_uid}, 'lid': LID}
try:
verseNode_uid = graph_conn.create_data(verseNode)
except Exception as e:
logging.error('At creating verse node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
elif (len(verseNode_query_res['verse']) > 1):
logging.error('At creating chapter node')
logging.error('Matched multiple verse nodes')
raise HTTPException(status_code=500, detail='Graph side error. Matched multiple verse nodes')
else:
verseNode_uid = verseNode_query_res['verse'][0]['uid']
wordNode = {'dgraph.type': 'WordNode', 'word': Word, 'belongsTo': {'uid': verseNode_uid}, 'position': Position}
if (bible_name == 'Grk UGNT4 bible'):
wordNode['pronunciation'] = Pronunciation
for (key, value) in zip(Morph_sequence, Morph):
if (value != ):
wordNode[key] = value
variables = {'$strongnum': str(Strongs)}
try:
strongNode_query_res = graph_conn.query_data(strongNode_query, variables)
except Exception as e:
logging.error('At fetching strong node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info('strongNode_query_res:', strongNode_query_res)
if (len(strongNode_query_res['strongs']) > 0):
strongNode_uid = strongNode_query_res['strongs'][0]['uid']
wordNode['strongsLink'] = {'uid': strongNode_uid}
if (TW_fullString != '-'):
(Type, word) = TW_fullString.split('/')[(- 2):]
variables = {'$word': word}
try:
twNode_query_res = graph_conn.query_data(twNode_query, variables)
except Exception as e:
logging.error('At fetching tw node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if (len(twNode_query_res['tw']) > 0):
twNode_uid = twNode_query_res['tw'][0]['uid']
wordNode['twLink'] = {'uid': twNode_uid}
try:
wordNode_uid = graph_conn.create_data(wordNode)
except Exception as e:
logging.error('At creating word node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info(('wordNode_uid:%s' % wordNode_uid))
cursor.close()
db.close()
text_tablename = tablename.replace('BibleWord', 'Text')
if (text_tablename == 'Grk_UGNT4_Text'):
text_tablename = 'Grk_UGNT_Text'
add_verseTextToBible(bib_node_uid, text_tablename, bookcode.value)
return {'msg': ('Added %s in %s' % (bookcode, bible_name))} | @app.post('/bibles', status_code=200, tags=['WRITE', 'Bible Contents'])
def add_bible(bible_name: str=Body('Hindi IRV4 bible'), language: str=Body('Hindi'), version: str=Body('IRV4'), tablename: str=Body('Hin_IRV4_BibleWord'), bookcode: BibleBook=Body(BibleBook.mat)):
' create a bible node, fetches contents from specified table in MySQL DB and adds to Graph.\n\tCurrently the API is implemented to add only one book at a time. \n\tThis is due to the amount of time required.'
try:
bibNode_query_res = graph_conn.query_data(bible_uid_query, {'$bib': bible_name})
except Exception as e:
logging.error('At fetching Bible uid')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if (len(bibNode_query_res['bible']) == 0):
bib_node = {'dgraph.type': 'BibleNode', 'bible': bible_name, 'language': language, 'version': str(version)}
try:
bib_node_uid = graph_conn.create_data(bib_node)
logging.info(('bib_node_uid: %s' % bib_node_uid))
except Exception as e:
logging.error('At creating Bible node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
elif (len(bibNode_query_res['bible']) > 1):
logging.error('At fetching Bible uid')
logging.error('matched multiple bible nodes')
raise HTTPException(status_code=500, detail=('Graph side error. ' + ' matched multiple bible nodes'))
else:
bib_node_uid = bibNode_query_res['bible'][0]['uid']
try:
db = pymysql.connect(host='localhost', database=rel_db_name, user='root', password='password', charset='utf8mb4')
cursor = db.cursor(pymysql.cursors.SSCursor)
except Exception as e:
logging.error('At connecting to MYSQL')
logging.error(e)
raise HTTPException(status_code=502, detail=('MySQL side error. ' + str(e)))
try:
if (bible_name == 'Grk UGNT4 bible'):
Morph_sequence = ['Role', 'Type', 'Mood', 'Tense', 'Voice', 'Person', 'Case', 'Gender', 'Number', 'Degree']
cursor.execute((('Select LID, Position, Word, Map.Book, Chapter, Verse,lookup.Book, Strongs, Morph, Pronunciation, TW, lookup.Code from ' + tablename) + ' JOIN Bcv_LidMap as Map ON LID=Map.ID JOIN Bible_Book_Lookup as lookup ON lookup.ID=Map.Book where lookup.ID = %s order by LID, Position'), book_num_map[bookcode.value])
else:
cursor.execute((('Select LID, Position, Word, Map.Book, Chapter, Verse,lookup.Book, lookup.Code from ' + tablename) + ' JOIN Bcv_LidMap as Map ON LID=Map.ID JOIN Bible_Book_Lookup as lookup ON lookup.ID=Map.Book where lookup.ID=%s order by LID, Position'), book_num_map[bookcode.value])
except Exception as e:
logging.error('At fetching data from MYSQL')
logging.error(e)
raise HTTPException(status_code=502, detail=('MySQL side error. ' + str(e)))
count_for_test = 0
chapNode = None
while True:
next_row = cursor.fetchone()
if (not next_row):
break
count_for_test += 1
LID = next_row[0]
Position = next_row[1]
Word = next_row[2]
BookNum = next_row[3]
Chapter = next_row[4]
Verse = next_row[5]
BookName = next_row[6]
book_code = next_row[(- 1)]
if (bible_name == 'Grk UGNT4 bible'):
Strongs = next_row[7]
Morph = next_row[8].split(',')
Pronunciation = next_row[9]
TW_fullString = next_row[10]
logging.info(((((('Book,Chapter,Verse:' + str(BookNum)) + ',') + str(Chapter)) + ',') + str(Verse)))
variables = {'$bib': bib_node_uid, '$book': BookName}
try:
bookNode_query_res = graph_conn.query_data(bookNode_query, variables)
except Exception as e:
logging.error('At fetching book node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if (len(bookNode_query_res['book']) == 0):
bookNode = {'dgraph.type': 'BookNode', 'book': BookName, 'bookNumber': BookNum, 'belongsTo': {'uid': bib_node_uid}}
try:
bookNode_uid = graph_conn.create_data(bookNode)
except Exception as e:
logging.error('At creating book node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
elif (len(bookNode_query_res['book']) > 1):
logging.error('At fetching book node')
logging.error('Matched multiple book nodes')
raise HTTPException(status_code=500, detail='Graph side error. Matched multiple book nodes')
else:
bookNode_uid = bookNode_query_res['book'][0]['uid']
variables = {'$book': bookNode_uid, '$chap': str(Chapter)}
try:
chapNode_query_res = graph_conn.query_data(chapNode_query, variables)
except Exception as e:
logging.error('At fetching chapter node')
logging.error(e)
raise HTTPException(status_code=500, detail=('Graph side error. ' + str(e)))
if (len(chapNode_query_res['chapter']) == 0):
chapNode = {'dgraph.type': 'ChapterNode', 'chapter': Chapter, 'belongsTo': {'uid': bookNode_uid}}
try:
chapNode_uid = graph_conn.create_data(chapNode)
except Exception as e:
logging.error('At creating chapter node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
elif (len(chapNode_query_res['chapter']) > 1):
logging.error('At fetching chapter node')
logging.error('Matched multiple chapter nodes')
raise HTTPException(status_code=500, detail='Graph side error. Matched multiple chapter nodes')
else:
chapNode_uid = chapNode_query_res['chapter'][0]['uid']
variables = {'$chapter': chapNode_uid, '$verse': str(Verse)}
try:
verseNode_query_res = graph_conn.query_data(verseNode_query, variables)
except Exception as e:
logging.error('At fetching verse node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if (len(verseNode_query_res['verse']) == 0):
verseNode = {'dgraph.type': 'VerseNode', 'verse': Verse, 'refString': ((((book_code + ' ') + str(Chapter)) + ':') + str(Verse)), 'belongsTo': {'uid': chapNode_uid}, 'lid': LID}
try:
verseNode_uid = graph_conn.create_data(verseNode)
except Exception as e:
logging.error('At creating verse node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
elif (len(verseNode_query_res['verse']) > 1):
logging.error('At creating chapter node')
logging.error('Matched multiple verse nodes')
raise HTTPException(status_code=500, detail='Graph side error. Matched multiple verse nodes')
else:
verseNode_uid = verseNode_query_res['verse'][0]['uid']
wordNode = {'dgraph.type': 'WordNode', 'word': Word, 'belongsTo': {'uid': verseNode_uid}, 'position': Position}
if (bible_name == 'Grk UGNT4 bible'):
wordNode['pronunciation'] = Pronunciation
for (key, value) in zip(Morph_sequence, Morph):
if (value != ):
wordNode[key] = value
variables = {'$strongnum': str(Strongs)}
try:
strongNode_query_res = graph_conn.query_data(strongNode_query, variables)
except Exception as e:
logging.error('At fetching strong node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info('strongNode_query_res:', strongNode_query_res)
if (len(strongNode_query_res['strongs']) > 0):
strongNode_uid = strongNode_query_res['strongs'][0]['uid']
wordNode['strongsLink'] = {'uid': strongNode_uid}
if (TW_fullString != '-'):
(Type, word) = TW_fullString.split('/')[(- 2):]
variables = {'$word': word}
try:
twNode_query_res = graph_conn.query_data(twNode_query, variables)
except Exception as e:
logging.error('At fetching tw node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if (len(twNode_query_res['tw']) > 0):
twNode_uid = twNode_query_res['tw'][0]['uid']
wordNode['twLink'] = {'uid': twNode_uid}
try:
wordNode_uid = graph_conn.create_data(wordNode)
except Exception as e:
logging.error('At creating word node')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info(('wordNode_uid:%s' % wordNode_uid))
cursor.close()
db.close()
text_tablename = tablename.replace('BibleWord', 'Text')
if (text_tablename == 'Grk_UGNT4_Text'):
text_tablename = 'Grk_UGNT_Text'
add_verseTextToBible(bib_node_uid, text_tablename, bookcode.value)
return {'msg': ('Added %s in %s' % (bookcode, bible_name))}<|docstring|>create a bible node, fetches contents from specified table in MySQL DB and adds to Graph.
Currently the API is implemented to add only one book at a time.
This is due to the amount of time required.<|endoftext|> |
5329c53ae053be68a58e13572d0e1074a9c2fc5ec7d62bdfe015ed28e391853d | @app.get('/bibles/{bible_name}/books/{bookcode}/chapters/{chapter}', status_code=200, tags=['READ', 'Bible Contents'])
def get_whole_chapter(bible_name: str, bookcode: BibleBook, chapter: int):
' fetches all verses of the chapter \n\tincluding their strong number, tw and bible name connections\n\t'
result = {}
try:
variables = {'$bib': bible_name, '$book': str(book_num_map[bookcode]), '$chapter': str(chapter)}
query_res = graph_conn.query_data(whole_chapter_query, variables)
logging.info(('query_res: %s' % query_res))
except Exception as e:
logging.error('At fetching chapter contents')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
try:
result = query_res['chapter'][0]['~belongsTo'][0]['~belongsTo'][0]
for (j, ver) in enumerate(result['verses']):
for (i, wrd) in enumerate(ver['words']):
if ('translationWord' in wrd):
link = ('%s/translationwords?translation_word=%s' % (base_URL, wrd['translationWord']))
result['verses'][j]['words'][i]['translationWordLink'] = urllib.parse.quote(link, safe='/:?=')
if ('strongsNumber' in wrd):
link = ('%s/strongs?strongs_number=%s' % (base_URL, wrd['strongsNumber']))
result['verses'][j]['words'][i]['strongsLink'] = urllib.parse.quote(link, safe='/:?=')
except Exception as e:
logging.error('At parsing chapter contents')
logging.error(e)
raise HTTPException(status_code=404, detail='Requested content not Available. ')
return result | fetches all verses of the chapter
including their strong number, tw and bible name connections | dgraph/dGraph_fastAPI_server.py | get_whole_chapter | kavitharaju/vachan-graph | 3 | python | @app.get('/bibles/{bible_name}/books/{bookcode}/chapters/{chapter}', status_code=200, tags=['READ', 'Bible Contents'])
def get_whole_chapter(bible_name: str, bookcode: BibleBook, chapter: int):
' fetches all verses of the chapter \n\tincluding their strong number, tw and bible name connections\n\t'
result = {}
try:
variables = {'$bib': bible_name, '$book': str(book_num_map[bookcode]), '$chapter': str(chapter)}
query_res = graph_conn.query_data(whole_chapter_query, variables)
logging.info(('query_res: %s' % query_res))
except Exception as e:
logging.error('At fetching chapter contents')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
try:
result = query_res['chapter'][0]['~belongsTo'][0]['~belongsTo'][0]
for (j, ver) in enumerate(result['verses']):
for (i, wrd) in enumerate(ver['words']):
if ('translationWord' in wrd):
link = ('%s/translationwords?translation_word=%s' % (base_URL, wrd['translationWord']))
result['verses'][j]['words'][i]['translationWordLink'] = urllib.parse.quote(link, safe='/:?=')
if ('strongsNumber' in wrd):
link = ('%s/strongs?strongs_number=%s' % (base_URL, wrd['strongsNumber']))
result['verses'][j]['words'][i]['strongsLink'] = urllib.parse.quote(link, safe='/:?=')
except Exception as e:
logging.error('At parsing chapter contents')
logging.error(e)
raise HTTPException(status_code=404, detail='Requested content not Available. ')
return result | @app.get('/bibles/{bible_name}/books/{bookcode}/chapters/{chapter}', status_code=200, tags=['READ', 'Bible Contents'])
def get_whole_chapter(bible_name: str, bookcode: BibleBook, chapter: int):
' fetches all verses of the chapter \n\tincluding their strong number, tw and bible name connections\n\t'
result = {}
try:
variables = {'$bib': bible_name, '$book': str(book_num_map[bookcode]), '$chapter': str(chapter)}
query_res = graph_conn.query_data(whole_chapter_query, variables)
logging.info(('query_res: %s' % query_res))
except Exception as e:
logging.error('At fetching chapter contents')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
try:
result = query_res['chapter'][0]['~belongsTo'][0]['~belongsTo'][0]
for (j, ver) in enumerate(result['verses']):
for (i, wrd) in enumerate(ver['words']):
if ('translationWord' in wrd):
link = ('%s/translationwords?translation_word=%s' % (base_URL, wrd['translationWord']))
result['verses'][j]['words'][i]['translationWordLink'] = urllib.parse.quote(link, safe='/:?=')
if ('strongsNumber' in wrd):
link = ('%s/strongs?strongs_number=%s' % (base_URL, wrd['strongsNumber']))
result['verses'][j]['words'][i]['strongsLink'] = urllib.parse.quote(link, safe='/:?=')
except Exception as e:
logging.error('At parsing chapter contents')
logging.error(e)
raise HTTPException(status_code=404, detail='Requested content not Available. ')
return result<|docstring|>fetches all verses of the chapter
including their strong number, tw and bible name connections<|endoftext|> |
26a59acf1c58ba0b91a219dbcddfebc8a032498c72c3afc03f5580a922a0ce6d | @app.get('/bibles/{bible_name}/books/{bookcode}/chapters/{chapter}/verses/{verse}', status_code=200, tags=['READ', 'Bible Contents'])
def get_one_verse(bible_name: str, bookcode: BibleBook, chapter: int, verse: int):
' fetches all verses of the chapter \n\tincluding their strong number, tw and bible name connections\n\t'
result = {}
try:
variables = {'$bib': bible_name, '$book': str(book_num_map[bookcode]), '$chapter': str(chapter), '$verse': str(verse)}
query_res = graph_conn.query_data(one_verse_query, variables)
logging.info(('query_res: %s' % query_res))
except Exception as e:
logging.error('At fetching chapter contents')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
try:
result = query_res['verse'][0]['~belongsTo'][0]['~belongsTo'][0]['~belongsTo'][0]
for (i, wrd) in enumerate(result['words']):
if ('translationWord' in wrd):
link = ('%s/translationwords?translation_word=%s' % (base_URL, wrd['translationWord']))
result['words'][i]['translationWordLink'] = urllib.parse.quote(link, safe='/:?=')
if ('strongsNumber' in wrd):
link = ('%s/strongs?strongs_number=%s' % (base_URL, wrd['strongsNumber']))
result['words'][i]['strongsLink'] = urllib.parse.quote(link, safe='/:?=')
except Exception as e:
logging.error('At parsing verse contents')
logging.error(e)
raise HTTPException(status_code=404, detail='Requested content not Available. ')
return result | fetches all verses of the chapter
including their strong number, tw and bible name connections | dgraph/dGraph_fastAPI_server.py | get_one_verse | kavitharaju/vachan-graph | 3 | python | @app.get('/bibles/{bible_name}/books/{bookcode}/chapters/{chapter}/verses/{verse}', status_code=200, tags=['READ', 'Bible Contents'])
def get_one_verse(bible_name: str, bookcode: BibleBook, chapter: int, verse: int):
' fetches all verses of the chapter \n\tincluding their strong number, tw and bible name connections\n\t'
result = {}
try:
variables = {'$bib': bible_name, '$book': str(book_num_map[bookcode]), '$chapter': str(chapter), '$verse': str(verse)}
query_res = graph_conn.query_data(one_verse_query, variables)
logging.info(('query_res: %s' % query_res))
except Exception as e:
logging.error('At fetching chapter contents')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
try:
result = query_res['verse'][0]['~belongsTo'][0]['~belongsTo'][0]['~belongsTo'][0]
for (i, wrd) in enumerate(result['words']):
if ('translationWord' in wrd):
link = ('%s/translationwords?translation_word=%s' % (base_URL, wrd['translationWord']))
result['words'][i]['translationWordLink'] = urllib.parse.quote(link, safe='/:?=')
if ('strongsNumber' in wrd):
link = ('%s/strongs?strongs_number=%s' % (base_URL, wrd['strongsNumber']))
result['words'][i]['strongsLink'] = urllib.parse.quote(link, safe='/:?=')
except Exception as e:
logging.error('At parsing verse contents')
logging.error(e)
raise HTTPException(status_code=404, detail='Requested content not Available. ')
return result | @app.get('/bibles/{bible_name}/books/{bookcode}/chapters/{chapter}/verses/{verse}', status_code=200, tags=['READ', 'Bible Contents'])
def get_one_verse(bible_name: str, bookcode: BibleBook, chapter: int, verse: int):
' fetches all verses of the chapter \n\tincluding their strong number, tw and bible name connections\n\t'
result = {}
try:
variables = {'$bib': bible_name, '$book': str(book_num_map[bookcode]), '$chapter': str(chapter), '$verse': str(verse)}
query_res = graph_conn.query_data(one_verse_query, variables)
logging.info(('query_res: %s' % query_res))
except Exception as e:
logging.error('At fetching chapter contents')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
try:
result = query_res['verse'][0]['~belongsTo'][0]['~belongsTo'][0]['~belongsTo'][0]
for (i, wrd) in enumerate(result['words']):
if ('translationWord' in wrd):
link = ('%s/translationwords?translation_word=%s' % (base_URL, wrd['translationWord']))
result['words'][i]['translationWordLink'] = urllib.parse.quote(link, safe='/:?=')
if ('strongsNumber' in wrd):
link = ('%s/strongs?strongs_number=%s' % (base_URL, wrd['strongsNumber']))
result['words'][i]['strongsLink'] = urllib.parse.quote(link, safe='/:?=')
except Exception as e:
logging.error('At parsing verse contents')
logging.error(e)
raise HTTPException(status_code=404, detail='Requested content not Available. ')
return result<|docstring|>fetches all verses of the chapter
including their strong number, tw and bible name connections<|endoftext|> |
690a50c826410c121d27be2f0c8dd3f423d99d871b0afbd9524d720afe12a960 | @app.get('/bibles/{bible_name}/books/{bookcode}/chapters/{chapter}/verses/{verse}/words/{position}', status_code=200, tags=['READ', 'Bible Contents'])
def get_verse_word(bible_name: str, bookcode: BibleBook, chapter: int, verse: int, position: int):
' fetches all verses of the chapter \n\tincluding their strong number, tw and bible name connections\n\t'
result = {}
try:
variables = {'$bib': bible_name, '$book': str(book_num_map[bookcode]), '$chapter': str(chapter), '$verse': str(verse), '$pos': str(position)}
query_res = graph_conn.query_data(word_query, variables)
logging.info(('query_res: %s' % query_res))
except Exception as e:
logging.error('At fetching chapter contents')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
try:
result = query_res['word'][0]['~belongsTo'][0]['~belongsTo'][0]['~belongsTo'][0]
for (i, wrd) in enumerate(result['words']):
if ('translationWord' in wrd):
link = ('%s/translationwords?translation_word=%s' % (base_URL, wrd['translationWord']))
result['words'][i]['translationWordLink'] = urllib.parse.quote(link, safe='/:?=')
if ('strongsNumber' in wrd):
link = ('%s/strongs?strongs_number=%s' % (base_URL, wrd['strongsNumber']))
result['words'][i]['strongsLink'] = urllib.parse.quote(link, safe='/:?=')
except Exception as e:
logging.error('At parsing verse contents')
logging.error(e)
raise HTTPException(status_code=404, detail='Requested content not Available. ')
return result | fetches all verses of the chapter
including their strong number, tw and bible name connections | dgraph/dGraph_fastAPI_server.py | get_verse_word | kavitharaju/vachan-graph | 3 | python | @app.get('/bibles/{bible_name}/books/{bookcode}/chapters/{chapter}/verses/{verse}/words/{position}', status_code=200, tags=['READ', 'Bible Contents'])
def get_verse_word(bible_name: str, bookcode: BibleBook, chapter: int, verse: int, position: int):
' fetches all verses of the chapter \n\tincluding their strong number, tw and bible name connections\n\t'
result = {}
try:
variables = {'$bib': bible_name, '$book': str(book_num_map[bookcode]), '$chapter': str(chapter), '$verse': str(verse), '$pos': str(position)}
query_res = graph_conn.query_data(word_query, variables)
logging.info(('query_res: %s' % query_res))
except Exception as e:
logging.error('At fetching chapter contents')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
try:
result = query_res['word'][0]['~belongsTo'][0]['~belongsTo'][0]['~belongsTo'][0]
for (i, wrd) in enumerate(result['words']):
if ('translationWord' in wrd):
link = ('%s/translationwords?translation_word=%s' % (base_URL, wrd['translationWord']))
result['words'][i]['translationWordLink'] = urllib.parse.quote(link, safe='/:?=')
if ('strongsNumber' in wrd):
link = ('%s/strongs?strongs_number=%s' % (base_URL, wrd['strongsNumber']))
result['words'][i]['strongsLink'] = urllib.parse.quote(link, safe='/:?=')
except Exception as e:
logging.error('At parsing verse contents')
logging.error(e)
raise HTTPException(status_code=404, detail='Requested content not Available. ')
return result | @app.get('/bibles/{bible_name}/books/{bookcode}/chapters/{chapter}/verses/{verse}/words/{position}', status_code=200, tags=['READ', 'Bible Contents'])
def get_verse_word(bible_name: str, bookcode: BibleBook, chapter: int, verse: int, position: int):
' fetches all verses of the chapter \n\tincluding their strong number, tw and bible name connections\n\t'
result = {}
try:
variables = {'$bib': bible_name, '$book': str(book_num_map[bookcode]), '$chapter': str(chapter), '$verse': str(verse), '$pos': str(position)}
query_res = graph_conn.query_data(word_query, variables)
logging.info(('query_res: %s' % query_res))
except Exception as e:
logging.error('At fetching chapter contents')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
try:
result = query_res['word'][0]['~belongsTo'][0]['~belongsTo'][0]['~belongsTo'][0]
for (i, wrd) in enumerate(result['words']):
if ('translationWord' in wrd):
link = ('%s/translationwords?translation_word=%s' % (base_URL, wrd['translationWord']))
result['words'][i]['translationWordLink'] = urllib.parse.quote(link, safe='/:?=')
if ('strongsNumber' in wrd):
link = ('%s/strongs?strongs_number=%s' % (base_URL, wrd['strongsNumber']))
result['words'][i]['strongsLink'] = urllib.parse.quote(link, safe='/:?=')
except Exception as e:
logging.error('At parsing verse contents')
logging.error(e)
raise HTTPException(status_code=404, detail='Requested content not Available. ')
return result<|docstring|>fetches all verses of the chapter
including their strong number, tw and bible name connections<|endoftext|> |
b4595fda4c8f817822e370f25ac5886695a709f82996789adab4ad41a662e1cb | @app.post('/names', status_code=201, tags=['WRITE', 'Bible Names'])
def add_names():
'creates a Bible names dictionary.\n\t* Pass I: Collect names from factgrid, ubs and wiki files and add to dictionary.\n\t* Pass II: Connect the names to each other based on known relations\n\t* Pass III: Connects names to each other using "sameAs" relation \n\t* Pass IV: Connects names to bible Words in English ULB bible\n\t '
nodename = 'Bible Names'
variables = {'$dict': nodename}
dict_node_query_result = graph_conn.query_data(dict_node_query, variables)
if (len(dict_node_query_result['dict']) == 0):
dict_node = {'dgraph.type': 'DictionaryNode', 'dictionary': nodename}
try:
dict_node_uid = graph_conn.create_data(dict_node)
except Exception as e:
logging.error('At dict node creation')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
elif (len(dict_node_query_result['dict']) == 1):
dict_node_uid = dict_node_query_result['dict'][0]['uid']
else:
logging.error('At dict node fetch')
logging.error('More than one node matched')
raise HTTPException(status_code=502, detail='Graph side error. More than one node matched')
logging.info(('dict_node_uid: %s' % dict_node_uid))
factgrid_file = open('Resources/BibleNames/factgrid_person_query.json', 'r').read()
factgrid_names = json.loads(factgrid_file)
wiki_file = open('Resources/BibleNames/wiki_person_query.json', 'r').read()
wiki_names = json.loads(wiki_file)
ubs_names = get_nt_ot_names_from_ubs()
logging.info('Pass I: Adding names to dictionary')
for name in factgrid_names:
external_uid = name['Person']
label = name['PersonLabel']
desc = ''
if (',' in label):
(label1, label2) = label.split(',', 1)
label = label1
desc = (label2 + '. ')
name_node = {'dgraph.type': 'NameNode', 'externalUid': external_uid, 'name': label, 'belongsTo': {'uid': dict_node_uid}}
if ('PersonDescription' in name):
desc += name['PersonDescription']
if (desc != ''):
name_node['description'] = desc.strip()
if ('GenderLabel' in name):
name_node['gender'] = name['GenderLabel']
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': external_uid})
if (len(name_X_uid_query_res['name']) > 0):
logging.warn('Skipping name node creation')
logging.warn(('Name already exists\nNew name node: %s\nExisting node: %s' % (name_node, name_X_uid_query_res['name'][0])))
else:
name_node_uid = graph_conn.create_data(name_node)
logging.info(('name: %s, name_node_uid: %s' % (label, name_node_uid)))
except Exception as e:
logging.error('At name node creation')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
for name in ubs_names:
external_uid = ('ubs_name/' + name['id'])
label = name['name']
name_node = {'externalUid': external_uid, 'name': label, 'belongsTo': {'uid': dict_node_uid}}
if ('description' in name):
name_node['description'] = name['description'].strip()
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': external_uid})
if (len(name_X_uid_query_res['name']) > 0):
logging.warn('Skipping name node creation')
logging.warn(('Name already exists\nNew name node: %s\nExisting node: %s' % (name_node, name_X_uid_query_res['name'][0])))
else:
name_node_uid = graph_conn.create_data(name_node)
logging.info(('name: %s, name_node_uid: %s' % (label, name_node_uid)))
except Exception as e:
logging.error('At name node creation')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
for name in wiki_names:
external_uid = name['item']
label = name['itemLabel']
name_node = {'externalUid': external_uid, 'name': label, 'belongsTo': {'uid': dict_node_uid}}
if ('itemDescription' in name):
name_node['description'] = name['itemDescription'].strip()
if ('gender' in name):
name_node['gender'] = name['gender'].strip()
if ('birthdate' in name):
name_node['birthdate'] = name['birthdate'].strip()
if ('deathdate' in name):
name_node['deathdate'] = name['deathdate'].strip()
if ('birthPlaceLabel' in name):
name_node['birthPlace'] = name['birthPlaceLabel'].strip()
if ('deathPlaceLabel' in name):
name_node['deathPlace'] = name['deathPlaceLabel'].strip()
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': external_uid})
if (len(name_X_uid_query_res['name']) > 0):
logging.warn('Skipping name node creation')
logging.warn(('Name already exists\nNew name node: %s\nExisting node: %s' % (name_node, name_X_uid_query_res['name'][0])))
else:
name_node_uid = graph_conn.create_data(name_node)
logging.info(('name: %s, name_node_uid: %s' % (label, name_node_uid)))
except Exception as e:
logging.error('At name node creation')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info('Pass II: connecting names via known relations')
for name in factgrid_names:
external_uid = name['Person']
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': external_uid})
if (len(name_X_uid_query_res['name']) == 1):
name_node_uid = name_X_uid_query_res['name'][0]['uid']
else:
logging.error('At name node fetching')
logging.error(('Name node not found: %s' % external_uid))
raise HTTPException(status_code=502, detail='Graph side error. Name node not found.')
except Exception as e:
logging.error('At name node fetching')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
name_node = {'uid': name_node_uid}
if ('Father' in name):
father_external_uid = name['Father']
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': father_external_uid})
if (len(name_X_uid_query_res['name']) == 1):
father_node_uid = name_X_uid_query_res['name'][0]['uid']
name_node['father'] = {'uid': father_node_uid}
else:
logging.warn('At name node fetching')
logging.warn(('Name node not found: %s' % father_external_uid))
except Exception as e:
logging.error('At name node fetching')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if ('Mother' in name):
mother_external_uid = name['Mother']
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': mother_external_uid})
if (len(name_X_uid_query_res['name']) == 1):
mother_node_uid = name_X_uid_query_res['name'][0]['uid']
name_node['mother'] = {'uid': mother_node_uid}
else:
logging.warn('At name node fetching')
logging.warn(('Name node not found: %s' % mother_external_uid))
except Exception as e:
logging.error('At name node fetching')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if (('father' in name_node) or ('mother' in name_node)):
try:
graph_conn.create_data(name_node)
except Exception as e:
logging.error('At name connecting')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
for name in wiki_names:
external_uid = name['item']
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': external_uid})
if (len(name_X_uid_query_res['name']) == 1):
name_node_uid = name_X_uid_query_res['name'][0]['uid']
else:
logging.error('At name node fetching')
logging.error(('Name node not found: %s' % external_uid))
raise HTTPException(status_code=502, detail='Graph side error. Name node not found.')
except Exception as e:
logging.error('At name node fetching')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
name_node = {'uid': name_node_uid}
if ('father' in name):
father_external_uid = name['father']
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': father_external_uid})
if (len(name_X_uid_query_res['name']) == 1):
father_node_uid = name_X_uid_query_res['name'][0]['uid']
name_node['father'] = {'uid': father_node_uid}
else:
logging.warn('At name node fetching')
logging.warn(('Name node not found: %s' % father_external_uid))
except Exception as e:
logging.error('At name node fetching')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if ('mother' in name):
mother_external_uid = name['mother']
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': mother_external_uid})
if (len(name_X_uid_query_res['name']) == 1):
mother_node_uid = name_X_uid_query_res['name'][0]['uid']
name_node['mother'] = {'uid': mother_node_uid}
else:
logging.warn('At name node fetching')
logging.warn(('Name node not found: %s' % mother_external_uid))
except Exception as e:
logging.error('At name node fetching')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if ('spouse' in name):
spouse_external_uid = name['spouse']
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': spouse_external_uid})
if (len(name_X_uid_query_res['name']) == 1):
spouse_node_uid = name_X_uid_query_res['name'][0]['uid']
name_node['spouse'] = {'uid': spouse_node_uid}
else:
logging.warn('At name node fetching')
logging.warn(('Name node not found: %s' % spouse_external_uid))
except Exception as e:
logging.error('At name node fetching')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if (('father' in name_node) or ('mother' in name_node) or ('spouse' in name_node)):
try:
graph_conn.create_data(name_node)
except Exception as e:
logging.error('At name connecting')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info('Pass III: Connecting names via sameAs relations based on manually connected data')
connection_file = open('Resources/BibleNames/connected_ne.json').read()
connections = json.loads(connection_file)
factgrid_id_pattern = 'https://database.factgrid.de/entity/'
wiki_id_pattern = 'http://www.wikidata.org/entity/'
ubs_id_pattern = 'ubs_name/'
for conn in connections:
if (conn['linked'] != 'manual'):
continue
ids = []
if ('factgrid' in conn):
f_ids = set(conn['factgrid'])
ids += [(factgrid_id_pattern + id) for id in f_ids]
if ('ubs' in conn):
u_ids = set(conn['ubs'])
ids += [(ubs_id_pattern + id) for id in u_ids]
if ('wiki' in conn):
w_ids = set(conn['wiki'])
ids += [(wiki_id_pattern + id) for id in w_ids]
for (a, b) in itertools.product(ids, ids):
if (a == b):
continue
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': a.strip()})
if (len(name_X_uid_query_res['name']) == 1):
a_node_uid = name_X_uid_query_res['name'][0]['uid']
else:
logging.warn('At fetching name nodes')
logging.warn(('cannot find one node for a_node: %s' % a))
logging.warn(('got query result: %s' % name_X_uid_query_res))
continue
except Exception as e:
logging.error('At fetching name nodes')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': b.strip()})
if (len(name_X_uid_query_res['name']) == 1):
b_node_uid = name_X_uid_query_res['name'][0]['uid']
else:
logging.warn('At fetching name nodes')
logging.warn(('cannot find one node for b_node: %s' % b))
logging.warn(('got query result: %s' % name_X_uid_query_res))
continue
except Exception as e:
logging.error('At fetching name nodes')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
sameAs_connection = {'uid': a_node_uid, 'sameAs': {'uid': b_node_uid}}
try:
graph_conn.create_data(sameAs_connection)
except Exception as e:
logging.error('At name connecting via sameAs')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info('Pass IV: Connecting names to Bible words')
for name in factgrid_names:
external_uid = name['Person']
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': external_uid})
if (len(name_X_uid_query_res['name']) == 1):
name_node_uid = name_X_uid_query_res['name'][0]['uid']
search_names = [name_X_uid_query_res['name'][0]['name']]
if ('sameAs' in name_X_uid_query_res['name'][0]):
search_names += [same['name'] for same in name_X_uid_query_res['name'][0]['sameAs']]
else:
logging.error('At name node fetching')
logging.error(('Name node not found: %s' % external_uid))
raise HTTPException(status_code=502, detail='Graph side error. Name node not found.')
except Exception as e:
logging.error('At name node fetching')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
ref_pattern = re.compile('(\\d* ?\\w+) (\\d+):?(\\d+)?')
if ('notedLabel' in name):
ref = name['notedLabel']
inconsistant_values = ['Superscript of Psalm 7', 'The General Epistle of Jude', 'New Testament', 'Pilate stone']
try:
ref_obj = re.match(ref_pattern, ref)
book = ref_obj.group(1)
chapter = ref_obj.group(2)
verse = ref_obj.group(3)
except Exception as e:
if (ref in inconsistant_values):
continue
logging.error(('At Parsing Reference:%s' % ref))
logging.error(e)
raise HTTPException(status_code=502, detail=('Regex error. ' + str(e)))
if (verse == None):
verse = 0
variables = {'$bib': 'English ULB bible', '$book': str(book_num_map[book]), '$chapter': str(chapter), '$verse': str(verse)}
try:
one_verse_query_res = graph_conn.query_data(one_verse_query, variables)
except Exception as e:
logging.error(('At fetching words in verse:%s' % variables))
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
found_verse = False
found_word = False
search_names_cleaned = [name.split(' ', 1)[0].replace(',', '').lower() for name in search_names]
search_names = set(search_names_cleaned)
if (len(one_verse_query_res['verse'][0]) > 0):
if (('~belongsTo' in one_verse_query_res['verse'][0]) and (len(one_verse_query_res['verse'][0]['~belongsTo']) > 0)):
if (('~belongsTo' in one_verse_query_res['verse'][0]['~belongsTo'][0]) and (len(one_verse_query_res['verse'][0]['~belongsTo'][0]['~belongsTo']) > 0)):
if (('~belongsTo' in one_verse_query_res['verse'][0]['~belongsTo'][0]['~belongsTo'][0]) and (len(one_verse_query_res['verse'][0]['~belongsTo'][0]['~belongsTo'][0]['~belongsTo']) > 0)):
words = one_verse_query_res['verse'][0]['~belongsTo'][0]['~belongsTo'][0]['~belongsTo'][0]['words']
for wrd in words:
if (re.sub(non_letter_pattern, '', wrd['word'].lower().replace("'s", '')) in search_names):
name_connection = {'uid': wrd['uid'], 'nameLink': {'uid': name_node_uid}}
try:
logging.info(('linking %s to %s' % (name['PersonLabel'], wrd['word'])))
graph_conn.create_data(name_connection)
pass
except Exception as e:
logging.error('At creating nameLink')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
found_word = True
found_verse = True
if (not found_verse):
logging.warn(('verse %s not found' % variables))
elif (not found_word):
text = ' '.join([wrd['word'] for wrd in words])
logging.warn(('Matching word not found in the searched verse\n %s >>> %s' % (name['PersonLabel'], text)))
verse_not_found_count = 0
for name in ubs_names:
external_uid = ('ubs_name/' + name['id'])
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': external_uid})
if (len(name_X_uid_query_res['name']) == 1):
name_node_uid = name_X_uid_query_res['name'][0]['uid']
search_names = [name_X_uid_query_res['name'][0]['name']]
if ('sameAs' in name_X_uid_query_res['name'][0]):
search_names += [same['name'] for same in name_X_uid_query_res['name'][0]['sameAs']]
else:
logging.error('At name node fetching')
logging.error(('Name node not found: %s' % external_uid))
raise HTTPException(status_code=502, detail='Graph side error. Name node not found.')
except Exception as e:
logging.error('At name node fetching')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
search_names_cleaned = [name.split(' ', 1)[0].replace(';', '').lower() for name in search_names]
search_names = set(search_names_cleaned)
if ('occurances' in name):
refs = name['occurances']
for ref in refs:
(book, chapter, verse, pos) = ref
variables = {'$bib': 'English ULB bible', '$book': str(book), '$chapter': str(chapter), '$verse': str(verse)}
try:
one_verse_query_res = graph_conn.query_data(one_verse_query, variables)
except Exception as e:
logging.error(('At fetching words in verse:%s' % variables))
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
found_verse = False
found_word = False
if (len(one_verse_query_res['verse'][0]) > 0):
if (('~belongsTo' in one_verse_query_res['verse'][0]) and (len(one_verse_query_res['verse'][0]['~belongsTo']) > 0)):
if (('~belongsTo' in one_verse_query_res['verse'][0]['~belongsTo'][0]) and (len(one_verse_query_res['verse'][0]['~belongsTo'][0]['~belongsTo']) > 0)):
if (('~belongsTo' in one_verse_query_res['verse'][0]['~belongsTo'][0]['~belongsTo'][0]) and (len(one_verse_query_res['verse'][0]['~belongsTo'][0]['~belongsTo'][0]['~belongsTo']) > 0)):
words = one_verse_query_res['verse'][0]['~belongsTo'][0]['~belongsTo'][0]['~belongsTo'][0]['words']
for wrd in words:
if (re.sub(non_letter_pattern, '', wrd['word'].lower().replace("'s", '')) in search_names):
name_connection = {'uid': wrd['uid'], 'nameLink': {'uid': name_node_uid}}
try:
logging.info(('linking %s to %s' % (name['name'], wrd['word'])))
graph_conn.create_data(name_connection)
pass
except Exception as e:
logging.error('At creating nameLink')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
found_word = True
found_verse = True
if (not found_verse):
logging.warn(('verse %s not found' % variables))
verse_not_found_count += 1
elif (not found_word):
text = ' '.join([wrd['word'] for wrd in words])
logging.warn(('Matching word not found in the searched verse\n %s >>> %s' % (name['name'], text)))
return {'msg': 'Added names'} | creates a Bible names dictionary.
* Pass I: Collect names from factgrid, ubs and wiki files and add to dictionary.
* Pass II: Connect the names to each other based on known relations
* Pass III: Connects names to each other using "sameAs" relation
* Pass IV: Connects names to bible Words in English ULB bible | dgraph/dGraph_fastAPI_server.py | add_names | kavitharaju/vachan-graph | 3 | python | @app.post('/names', status_code=201, tags=['WRITE', 'Bible Names'])
def add_names():
'creates a Bible names dictionary.\n\t* Pass I: Collect names from factgrid, ubs and wiki files and add to dictionary.\n\t* Pass II: Connect the names to each other based on known relations\n\t* Pass III: Connects names to each other using "sameAs" relation \n\t* Pass IV: Connects names to bible Words in English ULB bible\n\t '
nodename = 'Bible Names'
variables = {'$dict': nodename}
dict_node_query_result = graph_conn.query_data(dict_node_query, variables)
if (len(dict_node_query_result['dict']) == 0):
dict_node = {'dgraph.type': 'DictionaryNode', 'dictionary': nodename}
try:
dict_node_uid = graph_conn.create_data(dict_node)
except Exception as e:
logging.error('At dict node creation')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
elif (len(dict_node_query_result['dict']) == 1):
dict_node_uid = dict_node_query_result['dict'][0]['uid']
else:
logging.error('At dict node fetch')
logging.error('More than one node matched')
raise HTTPException(status_code=502, detail='Graph side error. More than one node matched')
logging.info(('dict_node_uid: %s' % dict_node_uid))
factgrid_file = open('Resources/BibleNames/factgrid_person_query.json', 'r').read()
factgrid_names = json.loads(factgrid_file)
wiki_file = open('Resources/BibleNames/wiki_person_query.json', 'r').read()
wiki_names = json.loads(wiki_file)
ubs_names = get_nt_ot_names_from_ubs()
logging.info('Pass I: Adding names to dictionary')
for name in factgrid_names:
external_uid = name['Person']
label = name['PersonLabel']
desc =
if (',' in label):
(label1, label2) = label.split(',', 1)
label = label1
desc = (label2 + '. ')
name_node = {'dgraph.type': 'NameNode', 'externalUid': external_uid, 'name': label, 'belongsTo': {'uid': dict_node_uid}}
if ('PersonDescription' in name):
desc += name['PersonDescription']
if (desc != ):
name_node['description'] = desc.strip()
if ('GenderLabel' in name):
name_node['gender'] = name['GenderLabel']
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': external_uid})
if (len(name_X_uid_query_res['name']) > 0):
logging.warn('Skipping name node creation')
logging.warn(('Name already exists\nNew name node: %s\nExisting node: %s' % (name_node, name_X_uid_query_res['name'][0])))
else:
name_node_uid = graph_conn.create_data(name_node)
logging.info(('name: %s, name_node_uid: %s' % (label, name_node_uid)))
except Exception as e:
logging.error('At name node creation')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
for name in ubs_names:
external_uid = ('ubs_name/' + name['id'])
label = name['name']
name_node = {'externalUid': external_uid, 'name': label, 'belongsTo': {'uid': dict_node_uid}}
if ('description' in name):
name_node['description'] = name['description'].strip()
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': external_uid})
if (len(name_X_uid_query_res['name']) > 0):
logging.warn('Skipping name node creation')
logging.warn(('Name already exists\nNew name node: %s\nExisting node: %s' % (name_node, name_X_uid_query_res['name'][0])))
else:
name_node_uid = graph_conn.create_data(name_node)
logging.info(('name: %s, name_node_uid: %s' % (label, name_node_uid)))
except Exception as e:
logging.error('At name node creation')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
for name in wiki_names:
external_uid = name['item']
label = name['itemLabel']
name_node = {'externalUid': external_uid, 'name': label, 'belongsTo': {'uid': dict_node_uid}}
if ('itemDescription' in name):
name_node['description'] = name['itemDescription'].strip()
if ('gender' in name):
name_node['gender'] = name['gender'].strip()
if ('birthdate' in name):
name_node['birthdate'] = name['birthdate'].strip()
if ('deathdate' in name):
name_node['deathdate'] = name['deathdate'].strip()
if ('birthPlaceLabel' in name):
name_node['birthPlace'] = name['birthPlaceLabel'].strip()
if ('deathPlaceLabel' in name):
name_node['deathPlace'] = name['deathPlaceLabel'].strip()
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': external_uid})
if (len(name_X_uid_query_res['name']) > 0):
logging.warn('Skipping name node creation')
logging.warn(('Name already exists\nNew name node: %s\nExisting node: %s' % (name_node, name_X_uid_query_res['name'][0])))
else:
name_node_uid = graph_conn.create_data(name_node)
logging.info(('name: %s, name_node_uid: %s' % (label, name_node_uid)))
except Exception as e:
logging.error('At name node creation')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info('Pass II: connecting names via known relations')
for name in factgrid_names:
external_uid = name['Person']
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': external_uid})
if (len(name_X_uid_query_res['name']) == 1):
name_node_uid = name_X_uid_query_res['name'][0]['uid']
else:
logging.error('At name node fetching')
logging.error(('Name node not found: %s' % external_uid))
raise HTTPException(status_code=502, detail='Graph side error. Name node not found.')
except Exception as e:
logging.error('At name node fetching')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
name_node = {'uid': name_node_uid}
if ('Father' in name):
father_external_uid = name['Father']
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': father_external_uid})
if (len(name_X_uid_query_res['name']) == 1):
father_node_uid = name_X_uid_query_res['name'][0]['uid']
name_node['father'] = {'uid': father_node_uid}
else:
logging.warn('At name node fetching')
logging.warn(('Name node not found: %s' % father_external_uid))
except Exception as e:
logging.error('At name node fetching')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if ('Mother' in name):
mother_external_uid = name['Mother']
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': mother_external_uid})
if (len(name_X_uid_query_res['name']) == 1):
mother_node_uid = name_X_uid_query_res['name'][0]['uid']
name_node['mother'] = {'uid': mother_node_uid}
else:
logging.warn('At name node fetching')
logging.warn(('Name node not found: %s' % mother_external_uid))
except Exception as e:
logging.error('At name node fetching')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if (('father' in name_node) or ('mother' in name_node)):
try:
graph_conn.create_data(name_node)
except Exception as e:
logging.error('At name connecting')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
for name in wiki_names:
external_uid = name['item']
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': external_uid})
if (len(name_X_uid_query_res['name']) == 1):
name_node_uid = name_X_uid_query_res['name'][0]['uid']
else:
logging.error('At name node fetching')
logging.error(('Name node not found: %s' % external_uid))
raise HTTPException(status_code=502, detail='Graph side error. Name node not found.')
except Exception as e:
logging.error('At name node fetching')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
name_node = {'uid': name_node_uid}
if ('father' in name):
father_external_uid = name['father']
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': father_external_uid})
if (len(name_X_uid_query_res['name']) == 1):
father_node_uid = name_X_uid_query_res['name'][0]['uid']
name_node['father'] = {'uid': father_node_uid}
else:
logging.warn('At name node fetching')
logging.warn(('Name node not found: %s' % father_external_uid))
except Exception as e:
logging.error('At name node fetching')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if ('mother' in name):
mother_external_uid = name['mother']
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': mother_external_uid})
if (len(name_X_uid_query_res['name']) == 1):
mother_node_uid = name_X_uid_query_res['name'][0]['uid']
name_node['mother'] = {'uid': mother_node_uid}
else:
logging.warn('At name node fetching')
logging.warn(('Name node not found: %s' % mother_external_uid))
except Exception as e:
logging.error('At name node fetching')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if ('spouse' in name):
spouse_external_uid = name['spouse']
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': spouse_external_uid})
if (len(name_X_uid_query_res['name']) == 1):
spouse_node_uid = name_X_uid_query_res['name'][0]['uid']
name_node['spouse'] = {'uid': spouse_node_uid}
else:
logging.warn('At name node fetching')
logging.warn(('Name node not found: %s' % spouse_external_uid))
except Exception as e:
logging.error('At name node fetching')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if (('father' in name_node) or ('mother' in name_node) or ('spouse' in name_node)):
try:
graph_conn.create_data(name_node)
except Exception as e:
logging.error('At name connecting')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info('Pass III: Connecting names via sameAs relations based on manually connected data')
connection_file = open('Resources/BibleNames/connected_ne.json').read()
connections = json.loads(connection_file)
factgrid_id_pattern = 'https://database.factgrid.de/entity/'
wiki_id_pattern = 'http://www.wikidata.org/entity/'
ubs_id_pattern = 'ubs_name/'
for conn in connections:
if (conn['linked'] != 'manual'):
continue
ids = []
if ('factgrid' in conn):
f_ids = set(conn['factgrid'])
ids += [(factgrid_id_pattern + id) for id in f_ids]
if ('ubs' in conn):
u_ids = set(conn['ubs'])
ids += [(ubs_id_pattern + id) for id in u_ids]
if ('wiki' in conn):
w_ids = set(conn['wiki'])
ids += [(wiki_id_pattern + id) for id in w_ids]
for (a, b) in itertools.product(ids, ids):
if (a == b):
continue
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': a.strip()})
if (len(name_X_uid_query_res['name']) == 1):
a_node_uid = name_X_uid_query_res['name'][0]['uid']
else:
logging.warn('At fetching name nodes')
logging.warn(('cannot find one node for a_node: %s' % a))
logging.warn(('got query result: %s' % name_X_uid_query_res))
continue
except Exception as e:
logging.error('At fetching name nodes')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': b.strip()})
if (len(name_X_uid_query_res['name']) == 1):
b_node_uid = name_X_uid_query_res['name'][0]['uid']
else:
logging.warn('At fetching name nodes')
logging.warn(('cannot find one node for b_node: %s' % b))
logging.warn(('got query result: %s' % name_X_uid_query_res))
continue
except Exception as e:
logging.error('At fetching name nodes')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
sameAs_connection = {'uid': a_node_uid, 'sameAs': {'uid': b_node_uid}}
try:
graph_conn.create_data(sameAs_connection)
except Exception as e:
logging.error('At name connecting via sameAs')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info('Pass IV: Connecting names to Bible words')
for name in factgrid_names:
external_uid = name['Person']
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': external_uid})
if (len(name_X_uid_query_res['name']) == 1):
name_node_uid = name_X_uid_query_res['name'][0]['uid']
search_names = [name_X_uid_query_res['name'][0]['name']]
if ('sameAs' in name_X_uid_query_res['name'][0]):
search_names += [same['name'] for same in name_X_uid_query_res['name'][0]['sameAs']]
else:
logging.error('At name node fetching')
logging.error(('Name node not found: %s' % external_uid))
raise HTTPException(status_code=502, detail='Graph side error. Name node not found.')
except Exception as e:
logging.error('At name node fetching')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
ref_pattern = re.compile('(\\d* ?\\w+) (\\d+):?(\\d+)?')
if ('notedLabel' in name):
ref = name['notedLabel']
inconsistant_values = ['Superscript of Psalm 7', 'The General Epistle of Jude', 'New Testament', 'Pilate stone']
try:
ref_obj = re.match(ref_pattern, ref)
book = ref_obj.group(1)
chapter = ref_obj.group(2)
verse = ref_obj.group(3)
except Exception as e:
if (ref in inconsistant_values):
continue
logging.error(('At Parsing Reference:%s' % ref))
logging.error(e)
raise HTTPException(status_code=502, detail=('Regex error. ' + str(e)))
if (verse == None):
verse = 0
variables = {'$bib': 'English ULB bible', '$book': str(book_num_map[book]), '$chapter': str(chapter), '$verse': str(verse)}
try:
one_verse_query_res = graph_conn.query_data(one_verse_query, variables)
except Exception as e:
logging.error(('At fetching words in verse:%s' % variables))
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
found_verse = False
found_word = False
search_names_cleaned = [name.split(' ', 1)[0].replace(',', ).lower() for name in search_names]
search_names = set(search_names_cleaned)
if (len(one_verse_query_res['verse'][0]) > 0):
if (('~belongsTo' in one_verse_query_res['verse'][0]) and (len(one_verse_query_res['verse'][0]['~belongsTo']) > 0)):
if (('~belongsTo' in one_verse_query_res['verse'][0]['~belongsTo'][0]) and (len(one_verse_query_res['verse'][0]['~belongsTo'][0]['~belongsTo']) > 0)):
if (('~belongsTo' in one_verse_query_res['verse'][0]['~belongsTo'][0]['~belongsTo'][0]) and (len(one_verse_query_res['verse'][0]['~belongsTo'][0]['~belongsTo'][0]['~belongsTo']) > 0)):
words = one_verse_query_res['verse'][0]['~belongsTo'][0]['~belongsTo'][0]['~belongsTo'][0]['words']
for wrd in words:
if (re.sub(non_letter_pattern, , wrd['word'].lower().replace("'s", )) in search_names):
name_connection = {'uid': wrd['uid'], 'nameLink': {'uid': name_node_uid}}
try:
logging.info(('linking %s to %s' % (name['PersonLabel'], wrd['word'])))
graph_conn.create_data(name_connection)
pass
except Exception as e:
logging.error('At creating nameLink')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
found_word = True
found_verse = True
if (not found_verse):
logging.warn(('verse %s not found' % variables))
elif (not found_word):
text = ' '.join([wrd['word'] for wrd in words])
logging.warn(('Matching word not found in the searched verse\n %s >>> %s' % (name['PersonLabel'], text)))
verse_not_found_count = 0
for name in ubs_names:
external_uid = ('ubs_name/' + name['id'])
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': external_uid})
if (len(name_X_uid_query_res['name']) == 1):
name_node_uid = name_X_uid_query_res['name'][0]['uid']
search_names = [name_X_uid_query_res['name'][0]['name']]
if ('sameAs' in name_X_uid_query_res['name'][0]):
search_names += [same['name'] for same in name_X_uid_query_res['name'][0]['sameAs']]
else:
logging.error('At name node fetching')
logging.error(('Name node not found: %s' % external_uid))
raise HTTPException(status_code=502, detail='Graph side error. Name node not found.')
except Exception as e:
logging.error('At name node fetching')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
search_names_cleaned = [name.split(' ', 1)[0].replace(';', ).lower() for name in search_names]
search_names = set(search_names_cleaned)
if ('occurances' in name):
refs = name['occurances']
for ref in refs:
(book, chapter, verse, pos) = ref
variables = {'$bib': 'English ULB bible', '$book': str(book), '$chapter': str(chapter), '$verse': str(verse)}
try:
one_verse_query_res = graph_conn.query_data(one_verse_query, variables)
except Exception as e:
logging.error(('At fetching words in verse:%s' % variables))
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
found_verse = False
found_word = False
if (len(one_verse_query_res['verse'][0]) > 0):
if (('~belongsTo' in one_verse_query_res['verse'][0]) and (len(one_verse_query_res['verse'][0]['~belongsTo']) > 0)):
if (('~belongsTo' in one_verse_query_res['verse'][0]['~belongsTo'][0]) and (len(one_verse_query_res['verse'][0]['~belongsTo'][0]['~belongsTo']) > 0)):
if (('~belongsTo' in one_verse_query_res['verse'][0]['~belongsTo'][0]['~belongsTo'][0]) and (len(one_verse_query_res['verse'][0]['~belongsTo'][0]['~belongsTo'][0]['~belongsTo']) > 0)):
words = one_verse_query_res['verse'][0]['~belongsTo'][0]['~belongsTo'][0]['~belongsTo'][0]['words']
for wrd in words:
if (re.sub(non_letter_pattern, , wrd['word'].lower().replace("'s", )) in search_names):
name_connection = {'uid': wrd['uid'], 'nameLink': {'uid': name_node_uid}}
try:
logging.info(('linking %s to %s' % (name['name'], wrd['word'])))
graph_conn.create_data(name_connection)
pass
except Exception as e:
logging.error('At creating nameLink')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
found_word = True
found_verse = True
if (not found_verse):
logging.warn(('verse %s not found' % variables))
verse_not_found_count += 1
elif (not found_word):
text = ' '.join([wrd['word'] for wrd in words])
logging.warn(('Matching word not found in the searched verse\n %s >>> %s' % (name['name'], text)))
return {'msg': 'Added names'} | @app.post('/names', status_code=201, tags=['WRITE', 'Bible Names'])
def add_names():
'creates a Bible names dictionary.\n\t* Pass I: Collect names from factgrid, ubs and wiki files and add to dictionary.\n\t* Pass II: Connect the names to each other based on known relations\n\t* Pass III: Connects names to each other using "sameAs" relation \n\t* Pass IV: Connects names to bible Words in English ULB bible\n\t '
nodename = 'Bible Names'
variables = {'$dict': nodename}
dict_node_query_result = graph_conn.query_data(dict_node_query, variables)
if (len(dict_node_query_result['dict']) == 0):
dict_node = {'dgraph.type': 'DictionaryNode', 'dictionary': nodename}
try:
dict_node_uid = graph_conn.create_data(dict_node)
except Exception as e:
logging.error('At dict node creation')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
elif (len(dict_node_query_result['dict']) == 1):
dict_node_uid = dict_node_query_result['dict'][0]['uid']
else:
logging.error('At dict node fetch')
logging.error('More than one node matched')
raise HTTPException(status_code=502, detail='Graph side error. More than one node matched')
logging.info(('dict_node_uid: %s' % dict_node_uid))
factgrid_file = open('Resources/BibleNames/factgrid_person_query.json', 'r').read()
factgrid_names = json.loads(factgrid_file)
wiki_file = open('Resources/BibleNames/wiki_person_query.json', 'r').read()
wiki_names = json.loads(wiki_file)
ubs_names = get_nt_ot_names_from_ubs()
logging.info('Pass I: Adding names to dictionary')
for name in factgrid_names:
external_uid = name['Person']
label = name['PersonLabel']
desc =
if (',' in label):
(label1, label2) = label.split(',', 1)
label = label1
desc = (label2 + '. ')
name_node = {'dgraph.type': 'NameNode', 'externalUid': external_uid, 'name': label, 'belongsTo': {'uid': dict_node_uid}}
if ('PersonDescription' in name):
desc += name['PersonDescription']
if (desc != ):
name_node['description'] = desc.strip()
if ('GenderLabel' in name):
name_node['gender'] = name['GenderLabel']
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': external_uid})
if (len(name_X_uid_query_res['name']) > 0):
logging.warn('Skipping name node creation')
logging.warn(('Name already exists\nNew name node: %s\nExisting node: %s' % (name_node, name_X_uid_query_res['name'][0])))
else:
name_node_uid = graph_conn.create_data(name_node)
logging.info(('name: %s, name_node_uid: %s' % (label, name_node_uid)))
except Exception as e:
logging.error('At name node creation')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
for name in ubs_names:
external_uid = ('ubs_name/' + name['id'])
label = name['name']
name_node = {'externalUid': external_uid, 'name': label, 'belongsTo': {'uid': dict_node_uid}}
if ('description' in name):
name_node['description'] = name['description'].strip()
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': external_uid})
if (len(name_X_uid_query_res['name']) > 0):
logging.warn('Skipping name node creation')
logging.warn(('Name already exists\nNew name node: %s\nExisting node: %s' % (name_node, name_X_uid_query_res['name'][0])))
else:
name_node_uid = graph_conn.create_data(name_node)
logging.info(('name: %s, name_node_uid: %s' % (label, name_node_uid)))
except Exception as e:
logging.error('At name node creation')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
for name in wiki_names:
external_uid = name['item']
label = name['itemLabel']
name_node = {'externalUid': external_uid, 'name': label, 'belongsTo': {'uid': dict_node_uid}}
if ('itemDescription' in name):
name_node['description'] = name['itemDescription'].strip()
if ('gender' in name):
name_node['gender'] = name['gender'].strip()
if ('birthdate' in name):
name_node['birthdate'] = name['birthdate'].strip()
if ('deathdate' in name):
name_node['deathdate'] = name['deathdate'].strip()
if ('birthPlaceLabel' in name):
name_node['birthPlace'] = name['birthPlaceLabel'].strip()
if ('deathPlaceLabel' in name):
name_node['deathPlace'] = name['deathPlaceLabel'].strip()
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': external_uid})
if (len(name_X_uid_query_res['name']) > 0):
logging.warn('Skipping name node creation')
logging.warn(('Name already exists\nNew name node: %s\nExisting node: %s' % (name_node, name_X_uid_query_res['name'][0])))
else:
name_node_uid = graph_conn.create_data(name_node)
logging.info(('name: %s, name_node_uid: %s' % (label, name_node_uid)))
except Exception as e:
logging.error('At name node creation')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info('Pass II: connecting names via known relations')
for name in factgrid_names:
external_uid = name['Person']
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': external_uid})
if (len(name_X_uid_query_res['name']) == 1):
name_node_uid = name_X_uid_query_res['name'][0]['uid']
else:
logging.error('At name node fetching')
logging.error(('Name node not found: %s' % external_uid))
raise HTTPException(status_code=502, detail='Graph side error. Name node not found.')
except Exception as e:
logging.error('At name node fetching')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
name_node = {'uid': name_node_uid}
if ('Father' in name):
father_external_uid = name['Father']
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': father_external_uid})
if (len(name_X_uid_query_res['name']) == 1):
father_node_uid = name_X_uid_query_res['name'][0]['uid']
name_node['father'] = {'uid': father_node_uid}
else:
logging.warn('At name node fetching')
logging.warn(('Name node not found: %s' % father_external_uid))
except Exception as e:
logging.error('At name node fetching')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if ('Mother' in name):
mother_external_uid = name['Mother']
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': mother_external_uid})
if (len(name_X_uid_query_res['name']) == 1):
mother_node_uid = name_X_uid_query_res['name'][0]['uid']
name_node['mother'] = {'uid': mother_node_uid}
else:
logging.warn('At name node fetching')
logging.warn(('Name node not found: %s' % mother_external_uid))
except Exception as e:
logging.error('At name node fetching')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if (('father' in name_node) or ('mother' in name_node)):
try:
graph_conn.create_data(name_node)
except Exception as e:
logging.error('At name connecting')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
for name in wiki_names:
external_uid = name['item']
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': external_uid})
if (len(name_X_uid_query_res['name']) == 1):
name_node_uid = name_X_uid_query_res['name'][0]['uid']
else:
logging.error('At name node fetching')
logging.error(('Name node not found: %s' % external_uid))
raise HTTPException(status_code=502, detail='Graph side error. Name node not found.')
except Exception as e:
logging.error('At name node fetching')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
name_node = {'uid': name_node_uid}
if ('father' in name):
father_external_uid = name['father']
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': father_external_uid})
if (len(name_X_uid_query_res['name']) == 1):
father_node_uid = name_X_uid_query_res['name'][0]['uid']
name_node['father'] = {'uid': father_node_uid}
else:
logging.warn('At name node fetching')
logging.warn(('Name node not found: %s' % father_external_uid))
except Exception as e:
logging.error('At name node fetching')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if ('mother' in name):
mother_external_uid = name['mother']
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': mother_external_uid})
if (len(name_X_uid_query_res['name']) == 1):
mother_node_uid = name_X_uid_query_res['name'][0]['uid']
name_node['mother'] = {'uid': mother_node_uid}
else:
logging.warn('At name node fetching')
logging.warn(('Name node not found: %s' % mother_external_uid))
except Exception as e:
logging.error('At name node fetching')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if ('spouse' in name):
spouse_external_uid = name['spouse']
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': spouse_external_uid})
if (len(name_X_uid_query_res['name']) == 1):
spouse_node_uid = name_X_uid_query_res['name'][0]['uid']
name_node['spouse'] = {'uid': spouse_node_uid}
else:
logging.warn('At name node fetching')
logging.warn(('Name node not found: %s' % spouse_external_uid))
except Exception as e:
logging.error('At name node fetching')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
if (('father' in name_node) or ('mother' in name_node) or ('spouse' in name_node)):
try:
graph_conn.create_data(name_node)
except Exception as e:
logging.error('At name connecting')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info('Pass III: Connecting names via sameAs relations based on manually connected data')
connection_file = open('Resources/BibleNames/connected_ne.json').read()
connections = json.loads(connection_file)
factgrid_id_pattern = 'https://database.factgrid.de/entity/'
wiki_id_pattern = 'http://www.wikidata.org/entity/'
ubs_id_pattern = 'ubs_name/'
for conn in connections:
if (conn['linked'] != 'manual'):
continue
ids = []
if ('factgrid' in conn):
f_ids = set(conn['factgrid'])
ids += [(factgrid_id_pattern + id) for id in f_ids]
if ('ubs' in conn):
u_ids = set(conn['ubs'])
ids += [(ubs_id_pattern + id) for id in u_ids]
if ('wiki' in conn):
w_ids = set(conn['wiki'])
ids += [(wiki_id_pattern + id) for id in w_ids]
for (a, b) in itertools.product(ids, ids):
if (a == b):
continue
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': a.strip()})
if (len(name_X_uid_query_res['name']) == 1):
a_node_uid = name_X_uid_query_res['name'][0]['uid']
else:
logging.warn('At fetching name nodes')
logging.warn(('cannot find one node for a_node: %s' % a))
logging.warn(('got query result: %s' % name_X_uid_query_res))
continue
except Exception as e:
logging.error('At fetching name nodes')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': b.strip()})
if (len(name_X_uid_query_res['name']) == 1):
b_node_uid = name_X_uid_query_res['name'][0]['uid']
else:
logging.warn('At fetching name nodes')
logging.warn(('cannot find one node for b_node: %s' % b))
logging.warn(('got query result: %s' % name_X_uid_query_res))
continue
except Exception as e:
logging.error('At fetching name nodes')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
sameAs_connection = {'uid': a_node_uid, 'sameAs': {'uid': b_node_uid}}
try:
graph_conn.create_data(sameAs_connection)
except Exception as e:
logging.error('At name connecting via sameAs')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
logging.info('Pass IV: Connecting names to Bible words')
for name in factgrid_names:
external_uid = name['Person']
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': external_uid})
if (len(name_X_uid_query_res['name']) == 1):
name_node_uid = name_X_uid_query_res['name'][0]['uid']
search_names = [name_X_uid_query_res['name'][0]['name']]
if ('sameAs' in name_X_uid_query_res['name'][0]):
search_names += [same['name'] for same in name_X_uid_query_res['name'][0]['sameAs']]
else:
logging.error('At name node fetching')
logging.error(('Name node not found: %s' % external_uid))
raise HTTPException(status_code=502, detail='Graph side error. Name node not found.')
except Exception as e:
logging.error('At name node fetching')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
ref_pattern = re.compile('(\\d* ?\\w+) (\\d+):?(\\d+)?')
if ('notedLabel' in name):
ref = name['notedLabel']
inconsistant_values = ['Superscript of Psalm 7', 'The General Epistle of Jude', 'New Testament', 'Pilate stone']
try:
ref_obj = re.match(ref_pattern, ref)
book = ref_obj.group(1)
chapter = ref_obj.group(2)
verse = ref_obj.group(3)
except Exception as e:
if (ref in inconsistant_values):
continue
logging.error(('At Parsing Reference:%s' % ref))
logging.error(e)
raise HTTPException(status_code=502, detail=('Regex error. ' + str(e)))
if (verse == None):
verse = 0
variables = {'$bib': 'English ULB bible', '$book': str(book_num_map[book]), '$chapter': str(chapter), '$verse': str(verse)}
try:
one_verse_query_res = graph_conn.query_data(one_verse_query, variables)
except Exception as e:
logging.error(('At fetching words in verse:%s' % variables))
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
found_verse = False
found_word = False
search_names_cleaned = [name.split(' ', 1)[0].replace(',', ).lower() for name in search_names]
search_names = set(search_names_cleaned)
if (len(one_verse_query_res['verse'][0]) > 0):
if (('~belongsTo' in one_verse_query_res['verse'][0]) and (len(one_verse_query_res['verse'][0]['~belongsTo']) > 0)):
if (('~belongsTo' in one_verse_query_res['verse'][0]['~belongsTo'][0]) and (len(one_verse_query_res['verse'][0]['~belongsTo'][0]['~belongsTo']) > 0)):
if (('~belongsTo' in one_verse_query_res['verse'][0]['~belongsTo'][0]['~belongsTo'][0]) and (len(one_verse_query_res['verse'][0]['~belongsTo'][0]['~belongsTo'][0]['~belongsTo']) > 0)):
words = one_verse_query_res['verse'][0]['~belongsTo'][0]['~belongsTo'][0]['~belongsTo'][0]['words']
for wrd in words:
if (re.sub(non_letter_pattern, , wrd['word'].lower().replace("'s", )) in search_names):
name_connection = {'uid': wrd['uid'], 'nameLink': {'uid': name_node_uid}}
try:
logging.info(('linking %s to %s' % (name['PersonLabel'], wrd['word'])))
graph_conn.create_data(name_connection)
pass
except Exception as e:
logging.error('At creating nameLink')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
found_word = True
found_verse = True
if (not found_verse):
logging.warn(('verse %s not found' % variables))
elif (not found_word):
text = ' '.join([wrd['word'] for wrd in words])
logging.warn(('Matching word not found in the searched verse\n %s >>> %s' % (name['PersonLabel'], text)))
verse_not_found_count = 0
for name in ubs_names:
external_uid = ('ubs_name/' + name['id'])
try:
name_X_uid_query_res = graph_conn.query_data(name_X_uid_query, {'$xuid': external_uid})
if (len(name_X_uid_query_res['name']) == 1):
name_node_uid = name_X_uid_query_res['name'][0]['uid']
search_names = [name_X_uid_query_res['name'][0]['name']]
if ('sameAs' in name_X_uid_query_res['name'][0]):
search_names += [same['name'] for same in name_X_uid_query_res['name'][0]['sameAs']]
else:
logging.error('At name node fetching')
logging.error(('Name node not found: %s' % external_uid))
raise HTTPException(status_code=502, detail='Graph side error. Name node not found.')
except Exception as e:
logging.error('At name node fetching')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
search_names_cleaned = [name.split(' ', 1)[0].replace(';', ).lower() for name in search_names]
search_names = set(search_names_cleaned)
if ('occurances' in name):
refs = name['occurances']
for ref in refs:
(book, chapter, verse, pos) = ref
variables = {'$bib': 'English ULB bible', '$book': str(book), '$chapter': str(chapter), '$verse': str(verse)}
try:
one_verse_query_res = graph_conn.query_data(one_verse_query, variables)
except Exception as e:
logging.error(('At fetching words in verse:%s' % variables))
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
found_verse = False
found_word = False
if (len(one_verse_query_res['verse'][0]) > 0):
if (('~belongsTo' in one_verse_query_res['verse'][0]) and (len(one_verse_query_res['verse'][0]['~belongsTo']) > 0)):
if (('~belongsTo' in one_verse_query_res['verse'][0]['~belongsTo'][0]) and (len(one_verse_query_res['verse'][0]['~belongsTo'][0]['~belongsTo']) > 0)):
if (('~belongsTo' in one_verse_query_res['verse'][0]['~belongsTo'][0]['~belongsTo'][0]) and (len(one_verse_query_res['verse'][0]['~belongsTo'][0]['~belongsTo'][0]['~belongsTo']) > 0)):
words = one_verse_query_res['verse'][0]['~belongsTo'][0]['~belongsTo'][0]['~belongsTo'][0]['words']
for wrd in words:
if (re.sub(non_letter_pattern, , wrd['word'].lower().replace("'s", )) in search_names):
name_connection = {'uid': wrd['uid'], 'nameLink': {'uid': name_node_uid}}
try:
logging.info(('linking %s to %s' % (name['name'], wrd['word'])))
graph_conn.create_data(name_connection)
pass
except Exception as e:
logging.error('At creating nameLink')
logging.error(e)
raise HTTPException(status_code=502, detail=('Graph side error. ' + str(e)))
found_word = True
found_verse = True
if (not found_verse):
logging.warn(('verse %s not found' % variables))
verse_not_found_count += 1
elif (not found_word):
text = ' '.join([wrd['word'] for wrd in words])
logging.warn(('Matching word not found in the searched verse\n %s >>> %s' % (name['name'], text)))
return {'msg': 'Added names'}<|docstring|>creates a Bible names dictionary.
* Pass I: Collect names from factgrid, ubs and wiki files and add to dictionary.
* Pass II: Connect the names to each other based on known relations
* Pass III: Connects names to each other using "sameAs" relation
* Pass IV: Connects names to bible Words in English ULB bible<|endoftext|> |
a4727a61ef56f30bc65e8c24adfcb99031b4b922cfef94a5f7e81f662dcba760 | @app.post('/versification/original', status_code=201, tags=['WRITE', 'Versification'])
def add_versification_orig(versification: dict):
'Create the entire versification structure with the original versification format'
nodename = 'original'
root_node = {'dgraph.type': 'VersificationNode', 'versification': nodename}
root_node_uid = graph_conn.create_data(root_node)
for book in versification['maxVerses']:
book_node = {'dgraphType': 'VersificationBookNode', 'bookcode': book, 'belongsTo': {'uid': root_node_uid}}
book_node_uid = graph_conn.create_data(book_node)
for (i, chap_max) in enumerate(versification['maxVerses'][book]):
chapter_node = {'dgraph.type': 'VersificationChapterNode', 'chapter': (i + 1), 'belongsTo': {'uid': book_node_uid}}
chapter_node_uid = graph_conn.create_data(chapter_node)
for verse in range(int(chap_max)):
verse_node = {'dgraph.tyep': 'VersificationVerseNode', 'verseNumber': (verse + 1), 'belongsTo': {'uid': chapter_node_uid}}
verse_node_uid = graph_conn.create_data(verse_node) | Create the entire versification structure with the original versification format | dgraph/dGraph_fastAPI_server.py | add_versification_orig | kavitharaju/vachan-graph | 3 | python | @app.post('/versification/original', status_code=201, tags=['WRITE', 'Versification'])
def add_versification_orig(versification: dict):
nodename = 'original'
root_node = {'dgraph.type': 'VersificationNode', 'versification': nodename}
root_node_uid = graph_conn.create_data(root_node)
for book in versification['maxVerses']:
book_node = {'dgraphType': 'VersificationBookNode', 'bookcode': book, 'belongsTo': {'uid': root_node_uid}}
book_node_uid = graph_conn.create_data(book_node)
for (i, chap_max) in enumerate(versification['maxVerses'][book]):
chapter_node = {'dgraph.type': 'VersificationChapterNode', 'chapter': (i + 1), 'belongsTo': {'uid': book_node_uid}}
chapter_node_uid = graph_conn.create_data(chapter_node)
for verse in range(int(chap_max)):
verse_node = {'dgraph.tyep': 'VersificationVerseNode', 'verseNumber': (verse + 1), 'belongsTo': {'uid': chapter_node_uid}}
verse_node_uid = graph_conn.create_data(verse_node) | @app.post('/versification/original', status_code=201, tags=['WRITE', 'Versification'])
def add_versification_orig(versification: dict):
nodename = 'original'
root_node = {'dgraph.type': 'VersificationNode', 'versification': nodename}
root_node_uid = graph_conn.create_data(root_node)
for book in versification['maxVerses']:
book_node = {'dgraphType': 'VersificationBookNode', 'bookcode': book, 'belongsTo': {'uid': root_node_uid}}
book_node_uid = graph_conn.create_data(book_node)
for (i, chap_max) in enumerate(versification['maxVerses'][book]):
chapter_node = {'dgraph.type': 'VersificationChapterNode', 'chapter': (i + 1), 'belongsTo': {'uid': book_node_uid}}
chapter_node_uid = graph_conn.create_data(chapter_node)
for verse in range(int(chap_max)):
verse_node = {'dgraph.tyep': 'VersificationVerseNode', 'verseNumber': (verse + 1), 'belongsTo': {'uid': chapter_node_uid}}
verse_node_uid = graph_conn.create_data(verse_node)<|docstring|>Create the entire versification structure with the original versification format<|endoftext|> |
f464d341cdd17de2f12e25059b8261d8144c094b43615b709db76f6cfff86a81 | @app.post('/versification/map', status_code=201, tags=['WRITE', 'Versification'])
def add_versification_map(versification: dict, bible_name: str):
'Add maps from verses of selected bible to the original versification structure as per the map'
connect_Graph()
bib_res = graph_conn.query_data(bible_uid_query, {'$bib': bible_name})
if (len(bib_res['bible']) < 1):
raise HTTPException('Bible not found:%s', bible_name)
bib_uid = bib_res['bible'][0]['uid']
for source_verse in versification['verseMappings']:
versi_verse = versification['verseMappings'][source_verse]
src_vars = process_ref_string(source_verse)
versi_vars = process_ref_string(versi_verse)
for item in src_vars:
item['$bib_uid'] = str(bib_uid)
i = 0
for var1 in src_vars:
var2 = versi_vars[i]
if (i < (len(versi_vars) - 1)):
i = (i + 1)
versi_map_nodes(var1, var2)
var1 = src_vars[(- 1)]
while (i < (len(versi_vars) - 1)):
var2 = versi_vars[i]
versi_map_nodes(var1, var2)
i += 1
for verse in versification['excludedVerses']:
verse_vars = process_ref_string(verse)
for var in verse_vars:
versi_node = graph_conn.query_data(versi_verse_node_query, var)
if (len(versi_node['verse']) < 1):
raise Exception('Cant find versification node: %s', var)
mapping = {'uid': str(bib_uid), 'excludedVerse': {'uid': versi_node['verse'][0]['uid']}}
print(mapping)
graph_conn.create_data(mapping)
for verse in versification['partialVerses']:
'if component verses are coming as muiltiple verse nodes in Graph, \n\t\tadd a "partialVerse" relation from root verse to components'
pass | Add maps from verses of selected bible to the original versification structure as per the map | dgraph/dGraph_fastAPI_server.py | add_versification_map | kavitharaju/vachan-graph | 3 | python | @app.post('/versification/map', status_code=201, tags=['WRITE', 'Versification'])
def add_versification_map(versification: dict, bible_name: str):
connect_Graph()
bib_res = graph_conn.query_data(bible_uid_query, {'$bib': bible_name})
if (len(bib_res['bible']) < 1):
raise HTTPException('Bible not found:%s', bible_name)
bib_uid = bib_res['bible'][0]['uid']
for source_verse in versification['verseMappings']:
versi_verse = versification['verseMappings'][source_verse]
src_vars = process_ref_string(source_verse)
versi_vars = process_ref_string(versi_verse)
for item in src_vars:
item['$bib_uid'] = str(bib_uid)
i = 0
for var1 in src_vars:
var2 = versi_vars[i]
if (i < (len(versi_vars) - 1)):
i = (i + 1)
versi_map_nodes(var1, var2)
var1 = src_vars[(- 1)]
while (i < (len(versi_vars) - 1)):
var2 = versi_vars[i]
versi_map_nodes(var1, var2)
i += 1
for verse in versification['excludedVerses']:
verse_vars = process_ref_string(verse)
for var in verse_vars:
versi_node = graph_conn.query_data(versi_verse_node_query, var)
if (len(versi_node['verse']) < 1):
raise Exception('Cant find versification node: %s', var)
mapping = {'uid': str(bib_uid), 'excludedVerse': {'uid': versi_node['verse'][0]['uid']}}
print(mapping)
graph_conn.create_data(mapping)
for verse in versification['partialVerses']:
'if component verses are coming as muiltiple verse nodes in Graph, \n\t\tadd a "partialVerse" relation from root verse to components'
pass | @app.post('/versification/map', status_code=201, tags=['WRITE', 'Versification'])
def add_versification_map(versification: dict, bible_name: str):
connect_Graph()
bib_res = graph_conn.query_data(bible_uid_query, {'$bib': bible_name})
if (len(bib_res['bible']) < 1):
raise HTTPException('Bible not found:%s', bible_name)
bib_uid = bib_res['bible'][0]['uid']
for source_verse in versification['verseMappings']:
versi_verse = versification['verseMappings'][source_verse]
src_vars = process_ref_string(source_verse)
versi_vars = process_ref_string(versi_verse)
for item in src_vars:
item['$bib_uid'] = str(bib_uid)
i = 0
for var1 in src_vars:
var2 = versi_vars[i]
if (i < (len(versi_vars) - 1)):
i = (i + 1)
versi_map_nodes(var1, var2)
var1 = src_vars[(- 1)]
while (i < (len(versi_vars) - 1)):
var2 = versi_vars[i]
versi_map_nodes(var1, var2)
i += 1
for verse in versification['excludedVerses']:
verse_vars = process_ref_string(verse)
for var in verse_vars:
versi_node = graph_conn.query_data(versi_verse_node_query, var)
if (len(versi_node['verse']) < 1):
raise Exception('Cant find versification node: %s', var)
mapping = {'uid': str(bib_uid), 'excludedVerse': {'uid': versi_node['verse'][0]['uid']}}
print(mapping)
graph_conn.create_data(mapping)
for verse in versification['partialVerses']:
'if component verses are coming as muiltiple verse nodes in Graph, \n\t\tadd a "partialVerse" relation from root verse to components'
pass<|docstring|>Add maps from verses of selected bible to the original versification structure as per the map<|endoftext|> |
9d65a37f9d20d95785965d5117415ff907e0b292646a6a075656b829a720fc0b | @app.get('/versification/map', status_code=200, tags=['READ', 'Versification'])
def get_versification_map(bible_name: str):
'Gets a text output as given by versification sniffer, if mapping is added for the bible'
versification = {}
versification['maxVerses'] = {}
versification['partialVerses'] = {}
versification['verseMappings'] = {}
versification['excludedVerses'] = []
versification['unexcludedVerses'] = {}
connect_Graph()
bib_res = graph_conn.query_data(bible_uid_query, {'$bib': bible_name})
if (len(bib_res['bible']) < 1):
raise HTTPException('Bible not found:%s', bible_name)
bib_uid = bib_res['bible'][0]['uid']
verses = graph_conn.query_data(exluded_verses_query, {'$bib_uid': str(bib_uid)})
for ver in verses['verse']:
ref = ('%s %s:%s' % (ver['book'], ver['chapter'], ver['verse']))
versification['excludedVerses'].append(ref)
print(versification['excludedVerses'])
mapped_verses = graph_conn.query_data(verse_mappings_query, {'$bib_uid': str(bib_uid)})
for ver in mapped_verses['verse']:
key = ('%s %s:%s' % (num_book_map[ver['srcBook']], ver['srcChapter'], ver['srcVerse']))
val = ('%s %s:%s' % (ver['trgBook'], ver['trgChapter'], ver['trgVerse']))
if (key in versification['verseMappings']):
match_obj = re.match(verse_range_pattern, versification['verseMappings'][key])
book = match_obj.group(1)
chapter = match_obj.group(2)
verse_s = match_obj.group(3)
verse_e = match_obj.group(4)
if ((book == ver['trgBook']) and (chapter == ver['trgChapter'])):
if (verse_e is None):
range_ = sorted([int(verse_s), ver['trgVerse']])
else:
range_ = sorted([int(verse_s), int(verse_e), ver['trgVerse']])
sorted_range = ((str(range_[0]) + '-') + str(range_[(- 1)]))
val = ('%s %s:%s' % (ver['trgBook'], ver['trgChapter'], sorted_range))
else:
val = ((versification['verseMappings'][key] + ', ') + val)
versification['verseMappings'][key] = val
print(versification['verseMappings'])
book_chapters = graph_conn.query_data(maxVerse_query, {'$bib_uid': str(bib_uid)})
for book in book_chapters['struct'][0]['~belongsTo']:
book_code = num_book_map[book['bookNumber']]
book_entry = []
for chap in book['~belongsTo']:
book_entry.append(chap['maxVerse'])
versification['maxVerses'][book_code] = book_entry
print(versification['maxVerses'])
return versification | Gets a text output as given by versification sniffer, if mapping is added for the bible | dgraph/dGraph_fastAPI_server.py | get_versification_map | kavitharaju/vachan-graph | 3 | python | @app.get('/versification/map', status_code=200, tags=['READ', 'Versification'])
def get_versification_map(bible_name: str):
versification = {}
versification['maxVerses'] = {}
versification['partialVerses'] = {}
versification['verseMappings'] = {}
versification['excludedVerses'] = []
versification['unexcludedVerses'] = {}
connect_Graph()
bib_res = graph_conn.query_data(bible_uid_query, {'$bib': bible_name})
if (len(bib_res['bible']) < 1):
raise HTTPException('Bible not found:%s', bible_name)
bib_uid = bib_res['bible'][0]['uid']
verses = graph_conn.query_data(exluded_verses_query, {'$bib_uid': str(bib_uid)})
for ver in verses['verse']:
ref = ('%s %s:%s' % (ver['book'], ver['chapter'], ver['verse']))
versification['excludedVerses'].append(ref)
print(versification['excludedVerses'])
mapped_verses = graph_conn.query_data(verse_mappings_query, {'$bib_uid': str(bib_uid)})
for ver in mapped_verses['verse']:
key = ('%s %s:%s' % (num_book_map[ver['srcBook']], ver['srcChapter'], ver['srcVerse']))
val = ('%s %s:%s' % (ver['trgBook'], ver['trgChapter'], ver['trgVerse']))
if (key in versification['verseMappings']):
match_obj = re.match(verse_range_pattern, versification['verseMappings'][key])
book = match_obj.group(1)
chapter = match_obj.group(2)
verse_s = match_obj.group(3)
verse_e = match_obj.group(4)
if ((book == ver['trgBook']) and (chapter == ver['trgChapter'])):
if (verse_e is None):
range_ = sorted([int(verse_s), ver['trgVerse']])
else:
range_ = sorted([int(verse_s), int(verse_e), ver['trgVerse']])
sorted_range = ((str(range_[0]) + '-') + str(range_[(- 1)]))
val = ('%s %s:%s' % (ver['trgBook'], ver['trgChapter'], sorted_range))
else:
val = ((versification['verseMappings'][key] + ', ') + val)
versification['verseMappings'][key] = val
print(versification['verseMappings'])
book_chapters = graph_conn.query_data(maxVerse_query, {'$bib_uid': str(bib_uid)})
for book in book_chapters['struct'][0]['~belongsTo']:
book_code = num_book_map[book['bookNumber']]
book_entry = []
for chap in book['~belongsTo']:
book_entry.append(chap['maxVerse'])
versification['maxVerses'][book_code] = book_entry
print(versification['maxVerses'])
return versification | @app.get('/versification/map', status_code=200, tags=['READ', 'Versification'])
def get_versification_map(bible_name: str):
versification = {}
versification['maxVerses'] = {}
versification['partialVerses'] = {}
versification['verseMappings'] = {}
versification['excludedVerses'] = []
versification['unexcludedVerses'] = {}
connect_Graph()
bib_res = graph_conn.query_data(bible_uid_query, {'$bib': bible_name})
if (len(bib_res['bible']) < 1):
raise HTTPException('Bible not found:%s', bible_name)
bib_uid = bib_res['bible'][0]['uid']
verses = graph_conn.query_data(exluded_verses_query, {'$bib_uid': str(bib_uid)})
for ver in verses['verse']:
ref = ('%s %s:%s' % (ver['book'], ver['chapter'], ver['verse']))
versification['excludedVerses'].append(ref)
print(versification['excludedVerses'])
mapped_verses = graph_conn.query_data(verse_mappings_query, {'$bib_uid': str(bib_uid)})
for ver in mapped_verses['verse']:
key = ('%s %s:%s' % (num_book_map[ver['srcBook']], ver['srcChapter'], ver['srcVerse']))
val = ('%s %s:%s' % (ver['trgBook'], ver['trgChapter'], ver['trgVerse']))
if (key in versification['verseMappings']):
match_obj = re.match(verse_range_pattern, versification['verseMappings'][key])
book = match_obj.group(1)
chapter = match_obj.group(2)
verse_s = match_obj.group(3)
verse_e = match_obj.group(4)
if ((book == ver['trgBook']) and (chapter == ver['trgChapter'])):
if (verse_e is None):
range_ = sorted([int(verse_s), ver['trgVerse']])
else:
range_ = sorted([int(verse_s), int(verse_e), ver['trgVerse']])
sorted_range = ((str(range_[0]) + '-') + str(range_[(- 1)]))
val = ('%s %s:%s' % (ver['trgBook'], ver['trgChapter'], sorted_range))
else:
val = ((versification['verseMappings'][key] + ', ') + val)
versification['verseMappings'][key] = val
print(versification['verseMappings'])
book_chapters = graph_conn.query_data(maxVerse_query, {'$bib_uid': str(bib_uid)})
for book in book_chapters['struct'][0]['~belongsTo']:
book_code = num_book_map[book['bookNumber']]
book_entry = []
for chap in book['~belongsTo']:
book_entry.append(chap['maxVerse'])
versification['maxVerses'][book_code] = book_entry
print(versification['maxVerses'])
return versification<|docstring|>Gets a text output as given by versification sniffer, if mapping is added for the bible<|endoftext|> |
d9e0ff8a7f677e1ce5373b58d783499e2c06bf1b7e767a416c154ccf843e43d1 | @app.get('/versification/verse', status_code=200, tags=['READ', 'Versification'])
def get_verse_map(bookcode: BibleBook, chapter: int, verse: int):
'Gets all verses mapped to the original verse given by bcv.'
connect_Graph()
var = {'$book': bookcode.upper(), '$chapter': str(chapter), '$verse': str(verse)}
mapped_verses = graph_conn.query_data(parallel_versi_verses_query, var)['verse']
res = mapped_verses
mapped_bibles = set([item['bible'] for item in mapped_verses])
var['$book'] = str(book_num_map[bookcode])
parallelverses = graph_conn.query_data(simple_parallel_verses_query, var)['verse']
for ver in parallelverses:
if (ver['bible'] not in mapped_bibles):
res.append(ver)
return res | Gets all verses mapped to the original verse given by bcv. | dgraph/dGraph_fastAPI_server.py | get_verse_map | kavitharaju/vachan-graph | 3 | python | @app.get('/versification/verse', status_code=200, tags=['READ', 'Versification'])
def get_verse_map(bookcode: BibleBook, chapter: int, verse: int):
connect_Graph()
var = {'$book': bookcode.upper(), '$chapter': str(chapter), '$verse': str(verse)}
mapped_verses = graph_conn.query_data(parallel_versi_verses_query, var)['verse']
res = mapped_verses
mapped_bibles = set([item['bible'] for item in mapped_verses])
var['$book'] = str(book_num_map[bookcode])
parallelverses = graph_conn.query_data(simple_parallel_verses_query, var)['verse']
for ver in parallelverses:
if (ver['bible'] not in mapped_bibles):
res.append(ver)
return res | @app.get('/versification/verse', status_code=200, tags=['READ', 'Versification'])
def get_verse_map(bookcode: BibleBook, chapter: int, verse: int):
connect_Graph()
var = {'$book': bookcode.upper(), '$chapter': str(chapter), '$verse': str(verse)}
mapped_verses = graph_conn.query_data(parallel_versi_verses_query, var)['verse']
res = mapped_verses
mapped_bibles = set([item['bible'] for item in mapped_verses])
var['$book'] = str(book_num_map[bookcode])
parallelverses = graph_conn.query_data(simple_parallel_verses_query, var)['verse']
for ver in parallelverses:
if (ver['bible'] not in mapped_bibles):
res.append(ver)
return res<|docstring|>Gets all verses mapped to the original verse given by bcv.<|endoftext|> |
c2b7d7cd92c237f4329c2359036c8e250ec3e4fd25ff1f6303b7b96b0a1e962d | def __iter__(self):
'\n Iterate over all bounding boxes.\n\n Yields\n ------\n BoundingBox\n '
i = 0
while True:
try:
i %= len(self.bounding_boxes)
(yield self.bounding_boxes[i])
i += 1
except ZeroDivisionError:
(yield None) | Iterate over all bounding boxes.
Yields
------
BoundingBox | vision/interface.py | __iter__ | cjhr95/IARC-2020 | 12 | python | def __iter__(self):
'\n Iterate over all bounding boxes.\n\n Yields\n ------\n BoundingBox\n '
i = 0
while True:
try:
i %= len(self.bounding_boxes)
(yield self.bounding_boxes[i])
i += 1
except ZeroDivisionError:
(yield None) | def __iter__(self):
'\n Iterate over all bounding boxes.\n\n Yields\n ------\n BoundingBox\n '
i = 0
while True:
try:
i %= len(self.bounding_boxes)
(yield self.bounding_boxes[i])
i += 1
except ZeroDivisionError:
(yield None)<|docstring|>Iterate over all bounding boxes.
Yields
------
BoundingBox<|endoftext|> |
9b5e7e88d0459ac2342e67cb45800d32949744ef503359952c470ca6579ce9e8 | def update(self, bounding_boxes):
'\n Update environment.\n\n Parameters\n ----------\n bounding_boxes: list[BoundingBox]\n New environment data.\n '
self.bounding_boxes = bounding_boxes | Update environment.
Parameters
----------
bounding_boxes: list[BoundingBox]
New environment data. | vision/interface.py | update | cjhr95/IARC-2020 | 12 | python | def update(self, bounding_boxes):
'\n Update environment.\n\n Parameters\n ----------\n bounding_boxes: list[BoundingBox]\n New environment data.\n '
self.bounding_boxes = bounding_boxes | def update(self, bounding_boxes):
'\n Update environment.\n\n Parameters\n ----------\n bounding_boxes: list[BoundingBox]\n New environment data.\n '
self.bounding_boxes = bounding_boxes<|docstring|>Update environment.
Parameters
----------
bounding_boxes: list[BoundingBox]
New environment data.<|endoftext|> |
47e7c431436ac679de8704855f59db55f3b0743ae36d70e72c1190d81812025a | def main():
' main entry point for module execution\n '
OnyxBufferPoolModule.main() | main entry point for module execution | ansible/venv/lib/python2.7/site-packages/ansible/modules/network/onyx/onyx_buffer_pool.py | main | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 17 | python | def main():
' \n '
OnyxBufferPoolModule.main() | def main():
' \n '
OnyxBufferPoolModule.main()<|docstring|>main entry point for module execution<|endoftext|> |
d5c7177704e803b8dcd8aa95b66e3df1900baafd9742e7c2c9969589f907df61 | def init_module(self):
' initialize module\n '
element_spec = dict(name=dict(type='str', required=True), pool_type=dict(choices=['lossless', 'lossy'], default='lossy'), memory_percent=dict(type='float'), switch_priority=dict(type='int'))
argument_spec = dict()
argument_spec.update(element_spec)
self._module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) | initialize module | ansible/venv/lib/python2.7/site-packages/ansible/modules/network/onyx/onyx_buffer_pool.py | init_module | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 17 | python | def init_module(self):
' \n '
element_spec = dict(name=dict(type='str', required=True), pool_type=dict(choices=['lossless', 'lossy'], default='lossy'), memory_percent=dict(type='float'), switch_priority=dict(type='int'))
argument_spec = dict()
argument_spec.update(element_spec)
self._module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) | def init_module(self):
' \n '
element_spec = dict(name=dict(type='str', required=True), pool_type=dict(choices=['lossless', 'lossy'], default='lossy'), memory_percent=dict(type='float'), switch_priority=dict(type='int'))
argument_spec = dict()
argument_spec.update(element_spec)
self._module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)<|docstring|>initialize module<|endoftext|> |
c08f070d851cbc98dd7cf5df22530ae89254446be22e7605d100e93b7b41dfa2 | def autolabel(rects, ax):
'Attach a text label above each bar in *rects*, displaying its height.'
for rect in rects:
height = rect.get_height()
ax.annotate('{:.0f}'.format(height), xy=((rect.get_x() + (rect.get_width() / 2)), height), xytext=(0, 3), textcoords='offset points', ha='center', va='bottom') | Attach a text label above each bar in *rects*, displaying its height. | functions.py | autolabel | lesh3000/sql_problem | 1 | python | def autolabel(rects, ax):
for rect in rects:
height = rect.get_height()
ax.annotate('{:.0f}'.format(height), xy=((rect.get_x() + (rect.get_width() / 2)), height), xytext=(0, 3), textcoords='offset points', ha='center', va='bottom') | def autolabel(rects, ax):
for rect in rects:
height = rect.get_height()
ax.annotate('{:.0f}'.format(height), xy=((rect.get_x() + (rect.get_width() / 2)), height), xytext=(0, 3), textcoords='offset points', ha='center', va='bottom')<|docstring|>Attach a text label above each bar in *rects*, displaying its height.<|endoftext|> |
b694a2c26a3cb907531d96b0dc61b0d0c052d33eeb6fad01bef2b0bf5e99b2dc | def loadLog(filePath):
'Loads json from path and converts to dataframe.\n\n Parameters\n ----------\n filePath : str, required\n \n the path of the file\n\n Returns\n ------\n obj\n \n Pandas DataFrame object\n \n '
dirname = os.path.abspath('')
log = os.path.join(dirname, ('data/' + filePath))
with open(log) as json_file:
data = json.load(json_file)
return pd.json_normalize(data) | Loads json from path and converts to dataframe.
Parameters
----------
filePath : str, required
the path of the file
Returns
------
obj
Pandas DataFrame object | functions.py | loadLog | lesh3000/sql_problem | 1 | python | def loadLog(filePath):
'Loads json from path and converts to dataframe.\n\n Parameters\n ----------\n filePath : str, required\n \n the path of the file\n\n Returns\n ------\n obj\n \n Pandas DataFrame object\n \n '
dirname = os.path.abspath()
log = os.path.join(dirname, ('data/' + filePath))
with open(log) as json_file:
data = json.load(json_file)
return pd.json_normalize(data) | def loadLog(filePath):
'Loads json from path and converts to dataframe.\n\n Parameters\n ----------\n filePath : str, required\n \n the path of the file\n\n Returns\n ------\n obj\n \n Pandas DataFrame object\n \n '
dirname = os.path.abspath()
log = os.path.join(dirname, ('data/' + filePath))
with open(log) as json_file:
data = json.load(json_file)
return pd.json_normalize(data)<|docstring|>Loads json from path and converts to dataframe.
Parameters
----------
filePath : str, required
the path of the file
Returns
------
obj
Pandas DataFrame object<|endoftext|> |
2fff0dcb9a8dc97973523a9cc7c0a6db5767729fde1c39e60badcda360a2ecbf | def compareDates(df, df1):
'makes comparisons of timelines available in slow and general logs\n -converts string to float\n -calculates and prints timelines of the logs\n -visualizes logs activites(number of requests per minute) in lineplot for each log\n -views and printsdescriptive stats (number of requests per minute) for each log\n -summarizes the above in boxplot\n -calculates the differences between the number of activities recorded in each log\n -plot the above on the scatter plot\n \n Parameters\n ----------\n df : pandas.dataFrame, required\n df1 : pandas.dataFrame, required\n\n '
gen = df['event_time'].map((lambda x: x[:19])).value_counts().sort_index()
slow = df1['start_time'].map((lambda x: x[:19])).value_counts().sort_index()
print('General log timeline is over: ')
print((datetime.datetime.strptime(max(df['event_time']), '%Y-%m-%d %H:%M:%S.%f') - datetime.datetime.strptime(min(df['event_time']), '%Y-%m-%d %H:%M:%S.%f')))
print('_______________________________')
print('Slow log timeline is over: ')
print((datetime.datetime.strptime(max(df1['start_time']), '%Y-%m-%d %H:%M:%S.%f') - datetime.datetime.strptime(min(df1['start_time']), '%Y-%m-%d %H:%M:%S.%f')))
merged = pd.concat([gen, slow], axis=1).fillna(0).sort_index()
(fig, ax) = plt.subplots()
fig.set_size_inches(15, 7, forward=True)
y = list(range(merged.shape[0]))
logs = ['event_time', 'start_time']
rects1 = ax.plot(y, merged['event_time'], alpha=0.3, label='General log')
rects2 = ax.plot(y, merged['start_time'], alpha=0.3, label='Slow log')
plt.legend(prop={'size': 10}, title='Logs')
plt.title('Requests per minute reported by both logs')
plt.xlabel('Timeline (minutes)')
plt.ylabel('Number of requests')
plt.show()
print(merged[['event_time', 'start_time']].describe())
(fig, ax) = plt.subplots()
fig.set_size_inches(15, 7, forward=True)
ax.set_title('Number of requests per minute')
ax.boxplot(merged[['event_time', 'start_time']].T)
ax.set_xticklabels(['General Log', 'Slow Log'])
plt.show()
differences = abs((merged['event_time'] - merged['start_time']))
print((('Logs provide ' + str(differences[(differences > 0)].shape[0])) + ' different values regarding number of requests per minute'))
print('__________________________')
plt.figure(figsize=(10, 5))
plt.title('Request number per minute differences')
plt.xlabel('Timeline (minutes)')
plt.ylabel('Difference in requests per minute')
plt.scatter(range(differences.shape[0]), differences)
plt.ylim(bottom=1.0) | makes comparisons of timelines available in slow and general logs
-converts string to float
-calculates and prints timelines of the logs
-visualizes logs activites(number of requests per minute) in lineplot for each log
-views and printsdescriptive stats (number of requests per minute) for each log
-summarizes the above in boxplot
-calculates the differences between the number of activities recorded in each log
-plot the above on the scatter plot
Parameters
----------
df : pandas.dataFrame, required
df1 : pandas.dataFrame, required | functions.py | compareDates | lesh3000/sql_problem | 1 | python | def compareDates(df, df1):
'makes comparisons of timelines available in slow and general logs\n -converts string to float\n -calculates and prints timelines of the logs\n -visualizes logs activites(number of requests per minute) in lineplot for each log\n -views and printsdescriptive stats (number of requests per minute) for each log\n -summarizes the above in boxplot\n -calculates the differences between the number of activities recorded in each log\n -plot the above on the scatter plot\n \n Parameters\n ----------\n df : pandas.dataFrame, required\n df1 : pandas.dataFrame, required\n\n '
gen = df['event_time'].map((lambda x: x[:19])).value_counts().sort_index()
slow = df1['start_time'].map((lambda x: x[:19])).value_counts().sort_index()
print('General log timeline is over: ')
print((datetime.datetime.strptime(max(df['event_time']), '%Y-%m-%d %H:%M:%S.%f') - datetime.datetime.strptime(min(df['event_time']), '%Y-%m-%d %H:%M:%S.%f')))
print('_______________________________')
print('Slow log timeline is over: ')
print((datetime.datetime.strptime(max(df1['start_time']), '%Y-%m-%d %H:%M:%S.%f') - datetime.datetime.strptime(min(df1['start_time']), '%Y-%m-%d %H:%M:%S.%f')))
merged = pd.concat([gen, slow], axis=1).fillna(0).sort_index()
(fig, ax) = plt.subplots()
fig.set_size_inches(15, 7, forward=True)
y = list(range(merged.shape[0]))
logs = ['event_time', 'start_time']
rects1 = ax.plot(y, merged['event_time'], alpha=0.3, label='General log')
rects2 = ax.plot(y, merged['start_time'], alpha=0.3, label='Slow log')
plt.legend(prop={'size': 10}, title='Logs')
plt.title('Requests per minute reported by both logs')
plt.xlabel('Timeline (minutes)')
plt.ylabel('Number of requests')
plt.show()
print(merged[['event_time', 'start_time']].describe())
(fig, ax) = plt.subplots()
fig.set_size_inches(15, 7, forward=True)
ax.set_title('Number of requests per minute')
ax.boxplot(merged[['event_time', 'start_time']].T)
ax.set_xticklabels(['General Log', 'Slow Log'])
plt.show()
differences = abs((merged['event_time'] - merged['start_time']))
print((('Logs provide ' + str(differences[(differences > 0)].shape[0])) + ' different values regarding number of requests per minute'))
print('__________________________')
plt.figure(figsize=(10, 5))
plt.title('Request number per minute differences')
plt.xlabel('Timeline (minutes)')
plt.ylabel('Difference in requests per minute')
plt.scatter(range(differences.shape[0]), differences)
plt.ylim(bottom=1.0) | def compareDates(df, df1):
'makes comparisons of timelines available in slow and general logs\n -converts string to float\n -calculates and prints timelines of the logs\n -visualizes logs activites(number of requests per minute) in lineplot for each log\n -views and printsdescriptive stats (number of requests per minute) for each log\n -summarizes the above in boxplot\n -calculates the differences between the number of activities recorded in each log\n -plot the above on the scatter plot\n \n Parameters\n ----------\n df : pandas.dataFrame, required\n df1 : pandas.dataFrame, required\n\n '
gen = df['event_time'].map((lambda x: x[:19])).value_counts().sort_index()
slow = df1['start_time'].map((lambda x: x[:19])).value_counts().sort_index()
print('General log timeline is over: ')
print((datetime.datetime.strptime(max(df['event_time']), '%Y-%m-%d %H:%M:%S.%f') - datetime.datetime.strptime(min(df['event_time']), '%Y-%m-%d %H:%M:%S.%f')))
print('_______________________________')
print('Slow log timeline is over: ')
print((datetime.datetime.strptime(max(df1['start_time']), '%Y-%m-%d %H:%M:%S.%f') - datetime.datetime.strptime(min(df1['start_time']), '%Y-%m-%d %H:%M:%S.%f')))
merged = pd.concat([gen, slow], axis=1).fillna(0).sort_index()
(fig, ax) = plt.subplots()
fig.set_size_inches(15, 7, forward=True)
y = list(range(merged.shape[0]))
logs = ['event_time', 'start_time']
rects1 = ax.plot(y, merged['event_time'], alpha=0.3, label='General log')
rects2 = ax.plot(y, merged['start_time'], alpha=0.3, label='Slow log')
plt.legend(prop={'size': 10}, title='Logs')
plt.title('Requests per minute reported by both logs')
plt.xlabel('Timeline (minutes)')
plt.ylabel('Number of requests')
plt.show()
print(merged[['event_time', 'start_time']].describe())
(fig, ax) = plt.subplots()
fig.set_size_inches(15, 7, forward=True)
ax.set_title('Number of requests per minute')
ax.boxplot(merged[['event_time', 'start_time']].T)
ax.set_xticklabels(['General Log', 'Slow Log'])
plt.show()
differences = abs((merged['event_time'] - merged['start_time']))
print((('Logs provide ' + str(differences[(differences > 0)].shape[0])) + ' different values regarding number of requests per minute'))
print('__________________________')
plt.figure(figsize=(10, 5))
plt.title('Request number per minute differences')
plt.xlabel('Timeline (minutes)')
plt.ylabel('Difference in requests per minute')
plt.scatter(range(differences.shape[0]), differences)
plt.ylim(bottom=1.0)<|docstring|>makes comparisons of timelines available in slow and general logs
-converts string to float
-calculates and prints timelines of the logs
-visualizes logs activites(number of requests per minute) in lineplot for each log
-views and printsdescriptive stats (number of requests per minute) for each log
-summarizes the above in boxplot
-calculates the differences between the number of activities recorded in each log
-plot the above on the scatter plot
Parameters
----------
df : pandas.dataFrame, required
df1 : pandas.dataFrame, required<|endoftext|> |
d4dcfb2a999d72e60b216bd1f76a9569af79c666ba809a8e76c1f76e21371cc3 | def clean(s):
'- Removes text between */ /*\n - removes spaces in from nad behind the string\n\n Parameters\n ----------\n s : str, required\n \n string to preprocess\n\n Returns\n ------\n s: str\n '
s = re.sub('\\s+', ' ', s)
try:
s = re.search('(.*)/(.*)', s).group(2)
except:
pass
return s.strip() | - Removes text between */ /*
- removes spaces in from nad behind the string
Parameters
----------
s : str, required
string to preprocess
Returns
------
s: str | functions.py | clean | lesh3000/sql_problem | 1 | python | def clean(s):
'- Removes text between */ /*\n - removes spaces in from nad behind the string\n\n Parameters\n ----------\n s : str, required\n \n string to preprocess\n\n Returns\n ------\n s: str\n '
s = re.sub('\\s+', ' ', s)
try:
s = re.search('(.*)/(.*)', s).group(2)
except:
pass
return s.strip() | def clean(s):
'- Removes text between */ /*\n - removes spaces in from nad behind the string\n\n Parameters\n ----------\n s : str, required\n \n string to preprocess\n\n Returns\n ------\n s: str\n '
s = re.sub('\\s+', ' ', s)
try:
s = re.search('(.*)/(.*)', s).group(2)
except:
pass
return s.strip()<|docstring|>- Removes text between */ /*
- removes spaces in from nad behind the string
Parameters
----------
s : str, required
string to preprocess
Returns
------
s: str<|endoftext|> |
1e9831fad0d4b541cdfeadceffaee4b04f8c7ea0c2b1d1f6828800c107d117bc | def getCapitalWords(df):
'Gets words written in capital letters.\n\n Parameters\n ----------\n df : obj, pandas dataframe\n \n\n Returns\n ------\n list :str\n \n list of strings\n '
arr = set()
for i in df:
try:
for u in i.split():
s = ''.join(re.findall('([A-Z])', u))
arr.add(s)
except:
pass
return arr | Gets words written in capital letters.
Parameters
----------
df : obj, pandas dataframe
Returns
------
list :str
list of strings | functions.py | getCapitalWords | lesh3000/sql_problem | 1 | python | def getCapitalWords(df):
'Gets words written in capital letters.\n\n Parameters\n ----------\n df : obj, pandas dataframe\n \n\n Returns\n ------\n list :str\n \n list of strings\n '
arr = set()
for i in df:
try:
for u in i.split():
s = .join(re.findall('([A-Z])', u))
arr.add(s)
except:
pass
return arr | def getCapitalWords(df):
'Gets words written in capital letters.\n\n Parameters\n ----------\n df : obj, pandas dataframe\n \n\n Returns\n ------\n list :str\n \n list of strings\n '
arr = set()
for i in df:
try:
for u in i.split():
s = .join(re.findall('([A-Z])', u))
arr.add(s)
except:
pass
return arr<|docstring|>Gets words written in capital letters.
Parameters
----------
df : obj, pandas dataframe
Returns
------
list :str
list of strings<|endoftext|> |
a39cfd12ce3c763bc6668cd5d1a951ce45d3c66957dae7dfcc59122e32bf1495 | def process_ndex_neighborhood(gene_names, network_id=None, rdf_out='bel_output.rdf', print_output=True):
"Return a BelProcessor for an NDEx network neighborhood.\n\n Parameters\n ----------\n gene_names : list\n A list of HGNC gene symbols to search the neighborhood of.\n Example: ['BRAF', 'MAP2K1']\n network_id : Optional[str]\n The UUID of the network in NDEx. By default, the BEL Large Corpus\n network is used.\n rdf_out : Optional[str]\n Name of the output file to save the RDF returned by the web service.\n This is useful for debugging purposes or to repeat the same query\n on an offline RDF file later. Default: bel_output.rdf\n\n Returns\n -------\n bp : BelProcessor\n A BelProcessor object which contains INDRA Statements in bp.statements.\n\n Notes\n -----\n This function calls process_belrdf to the returned RDF string from the\n webservice.\n "
if (network_id is None):
network_id = '9ea3c170-01ad-11e5-ac0f-000c29cb28fb'
url = (ndex_bel2rdf + ('/network/%s/asBELRDF/query' % network_id))
params = {'searchString': ' '.join(gene_names)}
res_json = ndex_client.send_request(url, params, is_json=True)
if (not res_json):
logger.error('No response for NDEx neighborhood query.')
return None
if res_json.get('error'):
error_msg = res_json.get('message')
logger.error(('BEL/RDF response contains error: %s' % error_msg))
return None
rdf = res_json.get('content')
if (not rdf):
logger.error('BEL/RDF response is empty.')
return None
with open(rdf_out, 'wb') as fh:
fh.write(rdf.encode('utf-8'))
bp = process_belrdf(rdf, print_output=print_output)
return bp | Return a BelProcessor for an NDEx network neighborhood.
Parameters
----------
gene_names : list
A list of HGNC gene symbols to search the neighborhood of.
Example: ['BRAF', 'MAP2K1']
network_id : Optional[str]
The UUID of the network in NDEx. By default, the BEL Large Corpus
network is used.
rdf_out : Optional[str]
Name of the output file to save the RDF returned by the web service.
This is useful for debugging purposes or to repeat the same query
on an offline RDF file later. Default: bel_output.rdf
Returns
-------
bp : BelProcessor
A BelProcessor object which contains INDRA Statements in bp.statements.
Notes
-----
This function calls process_belrdf to the returned RDF string from the
webservice. | indra/bel/bel_api.py | process_ndex_neighborhood | jmuhlich/indra | 0 | python | def process_ndex_neighborhood(gene_names, network_id=None, rdf_out='bel_output.rdf', print_output=True):
"Return a BelProcessor for an NDEx network neighborhood.\n\n Parameters\n ----------\n gene_names : list\n A list of HGNC gene symbols to search the neighborhood of.\n Example: ['BRAF', 'MAP2K1']\n network_id : Optional[str]\n The UUID of the network in NDEx. By default, the BEL Large Corpus\n network is used.\n rdf_out : Optional[str]\n Name of the output file to save the RDF returned by the web service.\n This is useful for debugging purposes or to repeat the same query\n on an offline RDF file later. Default: bel_output.rdf\n\n Returns\n -------\n bp : BelProcessor\n A BelProcessor object which contains INDRA Statements in bp.statements.\n\n Notes\n -----\n This function calls process_belrdf to the returned RDF string from the\n webservice.\n "
if (network_id is None):
network_id = '9ea3c170-01ad-11e5-ac0f-000c29cb28fb'
url = (ndex_bel2rdf + ('/network/%s/asBELRDF/query' % network_id))
params = {'searchString': ' '.join(gene_names)}
res_json = ndex_client.send_request(url, params, is_json=True)
if (not res_json):
logger.error('No response for NDEx neighborhood query.')
return None
if res_json.get('error'):
error_msg = res_json.get('message')
logger.error(('BEL/RDF response contains error: %s' % error_msg))
return None
rdf = res_json.get('content')
if (not rdf):
logger.error('BEL/RDF response is empty.')
return None
with open(rdf_out, 'wb') as fh:
fh.write(rdf.encode('utf-8'))
bp = process_belrdf(rdf, print_output=print_output)
return bp | def process_ndex_neighborhood(gene_names, network_id=None, rdf_out='bel_output.rdf', print_output=True):
"Return a BelProcessor for an NDEx network neighborhood.\n\n Parameters\n ----------\n gene_names : list\n A list of HGNC gene symbols to search the neighborhood of.\n Example: ['BRAF', 'MAP2K1']\n network_id : Optional[str]\n The UUID of the network in NDEx. By default, the BEL Large Corpus\n network is used.\n rdf_out : Optional[str]\n Name of the output file to save the RDF returned by the web service.\n This is useful for debugging purposes or to repeat the same query\n on an offline RDF file later. Default: bel_output.rdf\n\n Returns\n -------\n bp : BelProcessor\n A BelProcessor object which contains INDRA Statements in bp.statements.\n\n Notes\n -----\n This function calls process_belrdf to the returned RDF string from the\n webservice.\n "
if (network_id is None):
network_id = '9ea3c170-01ad-11e5-ac0f-000c29cb28fb'
url = (ndex_bel2rdf + ('/network/%s/asBELRDF/query' % network_id))
params = {'searchString': ' '.join(gene_names)}
res_json = ndex_client.send_request(url, params, is_json=True)
if (not res_json):
logger.error('No response for NDEx neighborhood query.')
return None
if res_json.get('error'):
error_msg = res_json.get('message')
logger.error(('BEL/RDF response contains error: %s' % error_msg))
return None
rdf = res_json.get('content')
if (not rdf):
logger.error('BEL/RDF response is empty.')
return None
with open(rdf_out, 'wb') as fh:
fh.write(rdf.encode('utf-8'))
bp = process_belrdf(rdf, print_output=print_output)
return bp<|docstring|>Return a BelProcessor for an NDEx network neighborhood.
Parameters
----------
gene_names : list
A list of HGNC gene symbols to search the neighborhood of.
Example: ['BRAF', 'MAP2K1']
network_id : Optional[str]
The UUID of the network in NDEx. By default, the BEL Large Corpus
network is used.
rdf_out : Optional[str]
Name of the output file to save the RDF returned by the web service.
This is useful for debugging purposes or to repeat the same query
on an offline RDF file later. Default: bel_output.rdf
Returns
-------
bp : BelProcessor
A BelProcessor object which contains INDRA Statements in bp.statements.
Notes
-----
This function calls process_belrdf to the returned RDF string from the
webservice.<|endoftext|> |
356568ef6b10a892ddf7695b59ae7a1bdabaf223995934179a1fa1188b6aa22b | def process_belrdf(rdf_str, print_output=True):
'Return a BelProcessor for a BEL/RDF string.\n\n Parameters\n ----------\n rdf_str : str\n A BEL/RDF string to be processed. This will usually come from reading\n a .rdf file.\n\n Returns\n -------\n bp : BelProcessor\n A BelProcessor object which contains INDRA Statements in bp.statements.\n\n Notes\n -----\n This function calls all the specific get_type_of_mechanism()\n functions of the newly constructed BelProcessor to extract\n INDRA Statements.\n '
g = rdflib.Graph()
try:
g.parse(data=rdf_str, format='nt')
except ParseError:
logger.error('Could not parse rdf.')
return None
bp = BelProcessor(g)
bp.get_complexes()
bp.get_activating_subs()
bp.get_modifications()
bp.get_activating_mods()
bp.get_composite_activating_mods()
bp.get_transcription()
bp.get_activation()
if print_output:
bp.print_statement_coverage()
bp.print_statements()
return bp | Return a BelProcessor for a BEL/RDF string.
Parameters
----------
rdf_str : str
A BEL/RDF string to be processed. This will usually come from reading
a .rdf file.
Returns
-------
bp : BelProcessor
A BelProcessor object which contains INDRA Statements in bp.statements.
Notes
-----
This function calls all the specific get_type_of_mechanism()
functions of the newly constructed BelProcessor to extract
INDRA Statements. | indra/bel/bel_api.py | process_belrdf | jmuhlich/indra | 0 | python | def process_belrdf(rdf_str, print_output=True):
'Return a BelProcessor for a BEL/RDF string.\n\n Parameters\n ----------\n rdf_str : str\n A BEL/RDF string to be processed. This will usually come from reading\n a .rdf file.\n\n Returns\n -------\n bp : BelProcessor\n A BelProcessor object which contains INDRA Statements in bp.statements.\n\n Notes\n -----\n This function calls all the specific get_type_of_mechanism()\n functions of the newly constructed BelProcessor to extract\n INDRA Statements.\n '
g = rdflib.Graph()
try:
g.parse(data=rdf_str, format='nt')
except ParseError:
logger.error('Could not parse rdf.')
return None
bp = BelProcessor(g)
bp.get_complexes()
bp.get_activating_subs()
bp.get_modifications()
bp.get_activating_mods()
bp.get_composite_activating_mods()
bp.get_transcription()
bp.get_activation()
if print_output:
bp.print_statement_coverage()
bp.print_statements()
return bp | def process_belrdf(rdf_str, print_output=True):
'Return a BelProcessor for a BEL/RDF string.\n\n Parameters\n ----------\n rdf_str : str\n A BEL/RDF string to be processed. This will usually come from reading\n a .rdf file.\n\n Returns\n -------\n bp : BelProcessor\n A BelProcessor object which contains INDRA Statements in bp.statements.\n\n Notes\n -----\n This function calls all the specific get_type_of_mechanism()\n functions of the newly constructed BelProcessor to extract\n INDRA Statements.\n '
g = rdflib.Graph()
try:
g.parse(data=rdf_str, format='nt')
except ParseError:
logger.error('Could not parse rdf.')
return None
bp = BelProcessor(g)
bp.get_complexes()
bp.get_activating_subs()
bp.get_modifications()
bp.get_activating_mods()
bp.get_composite_activating_mods()
bp.get_transcription()
bp.get_activation()
if print_output:
bp.print_statement_coverage()
bp.print_statements()
return bp<|docstring|>Return a BelProcessor for a BEL/RDF string.
Parameters
----------
rdf_str : str
A BEL/RDF string to be processed. This will usually come from reading
a .rdf file.
Returns
-------
bp : BelProcessor
A BelProcessor object which contains INDRA Statements in bp.statements.
Notes
-----
This function calls all the specific get_type_of_mechanism()
functions of the newly constructed BelProcessor to extract
INDRA Statements.<|endoftext|> |
03f3acde8005df8b54da6018bdd9da75268b633125652d5396c4286c3039e810 | def setup_conf():
'Setup the cfg for the clean up utility.\n\n Use separate setup_conf for the utility because there are many options\n from the main config that do not apply during clean-up.\n '
conf = cfg.CONF
cmd.register_cmd_opts(cmd.ovs_opts, conf)
l3_config.register_l3_agent_config_opts(l3_config.OPTS, conf)
agent_config.register_interface_driver_opts_helper(conf)
agent_config.register_interface_opts()
conf.set_default('ovsdb_timeout', CLEANUP_OVSDB_TIMEOUT, 'OVS')
return conf | Setup the cfg for the clean up utility.
Use separate setup_conf for the utility because there are many options
from the main config that do not apply during clean-up. | neutron/cmd/ovs_cleanup.py | setup_conf | mmidolesov2/neutron-1 | 1 | python | def setup_conf():
'Setup the cfg for the clean up utility.\n\n Use separate setup_conf for the utility because there are many options\n from the main config that do not apply during clean-up.\n '
conf = cfg.CONF
cmd.register_cmd_opts(cmd.ovs_opts, conf)
l3_config.register_l3_agent_config_opts(l3_config.OPTS, conf)
agent_config.register_interface_driver_opts_helper(conf)
agent_config.register_interface_opts()
conf.set_default('ovsdb_timeout', CLEANUP_OVSDB_TIMEOUT, 'OVS')
return conf | def setup_conf():
'Setup the cfg for the clean up utility.\n\n Use separate setup_conf for the utility because there are many options\n from the main config that do not apply during clean-up.\n '
conf = cfg.CONF
cmd.register_cmd_opts(cmd.ovs_opts, conf)
l3_config.register_l3_agent_config_opts(l3_config.OPTS, conf)
agent_config.register_interface_driver_opts_helper(conf)
agent_config.register_interface_opts()
conf.set_default('ovsdb_timeout', CLEANUP_OVSDB_TIMEOUT, 'OVS')
return conf<|docstring|>Setup the cfg for the clean up utility.
Use separate setup_conf for the utility because there are many options
from the main config that do not apply during clean-up.<|endoftext|> |
d75f4794247b325698c5bc982be43cabe12268a67c5d09f471a699571971fa68 | def get_bridge_deletable_ports(br):
"\n Return a list of OVS Bridge ports, excluding the ports who should not be\n cleaned. such ports are tagged with the 'skip_cleanup' key in external_ids.\n "
return [port.port_name for port in br.get_vif_ports() if (constants.SKIP_CLEANUP not in br.get_port_external_ids(port.port_name))] | Return a list of OVS Bridge ports, excluding the ports who should not be
cleaned. such ports are tagged with the 'skip_cleanup' key in external_ids. | neutron/cmd/ovs_cleanup.py | get_bridge_deletable_ports | mmidolesov2/neutron-1 | 1 | python | def get_bridge_deletable_ports(br):
"\n Return a list of OVS Bridge ports, excluding the ports who should not be\n cleaned. such ports are tagged with the 'skip_cleanup' key in external_ids.\n "
return [port.port_name for port in br.get_vif_ports() if (constants.SKIP_CLEANUP not in br.get_port_external_ids(port.port_name))] | def get_bridge_deletable_ports(br):
"\n Return a list of OVS Bridge ports, excluding the ports who should not be\n cleaned. such ports are tagged with the 'skip_cleanup' key in external_ids.\n "
return [port.port_name for port in br.get_vif_ports() if (constants.SKIP_CLEANUP not in br.get_port_external_ids(port.port_name))]<|docstring|>Return a list of OVS Bridge ports, excluding the ports who should not be
cleaned. such ports are tagged with the 'skip_cleanup' key in external_ids.<|endoftext|> |
0322a28700f4f67b534dd5b0da3f2c65a9642fb873fe3e8e5736a7e54f8a3d51 | def collect_neutron_ports(bridges):
'Collect ports created by Neutron from OVS.'
ports = []
for bridge in bridges:
ovs = ovs_lib.OVSBridge(bridge)
ports += get_bridge_deletable_ports(ovs)
return ports | Collect ports created by Neutron from OVS. | neutron/cmd/ovs_cleanup.py | collect_neutron_ports | mmidolesov2/neutron-1 | 1 | python | def collect_neutron_ports(bridges):
ports = []
for bridge in bridges:
ovs = ovs_lib.OVSBridge(bridge)
ports += get_bridge_deletable_ports(ovs)
return ports | def collect_neutron_ports(bridges):
ports = []
for bridge in bridges:
ovs = ovs_lib.OVSBridge(bridge)
ports += get_bridge_deletable_ports(ovs)
return ports<|docstring|>Collect ports created by Neutron from OVS.<|endoftext|> |
3bbee0d0b5810045e260a9511df10a579f6bc422c922ea5f2f9f48129ff463b9 | def delete_neutron_ports(ports):
'Delete non-internal ports created by Neutron\n\n Non-internal OVS ports need to be removed manually.\n '
for port in ports:
device = ip_lib.IPDevice(port)
if device.exists():
device.link.delete()
LOG.info('Deleting port: %s', port) | Delete non-internal ports created by Neutron
Non-internal OVS ports need to be removed manually. | neutron/cmd/ovs_cleanup.py | delete_neutron_ports | mmidolesov2/neutron-1 | 1 | python | def delete_neutron_ports(ports):
'Delete non-internal ports created by Neutron\n\n Non-internal OVS ports need to be removed manually.\n '
for port in ports:
device = ip_lib.IPDevice(port)
if device.exists():
device.link.delete()
LOG.info('Deleting port: %s', port) | def delete_neutron_ports(ports):
'Delete non-internal ports created by Neutron\n\n Non-internal OVS ports need to be removed manually.\n '
for port in ports:
device = ip_lib.IPDevice(port)
if device.exists():
device.link.delete()
LOG.info('Deleting port: %s', port)<|docstring|>Delete non-internal ports created by Neutron
Non-internal OVS ports need to be removed manually.<|endoftext|> |
d48d90d072114086f30bc312b7f65bf3a4dbee657158d165e8040d455004c2ed | def main():
'Main method for cleaning up OVS bridges.\n\n The utility cleans up the integration bridges used by Neutron.\n '
conf = setup_conf()
conf()
config.setup_logging()
do_main(conf) | Main method for cleaning up OVS bridges.
The utility cleans up the integration bridges used by Neutron. | neutron/cmd/ovs_cleanup.py | main | mmidolesov2/neutron-1 | 1 | python | def main():
'Main method for cleaning up OVS bridges.\n\n The utility cleans up the integration bridges used by Neutron.\n '
conf = setup_conf()
conf()
config.setup_logging()
do_main(conf) | def main():
'Main method for cleaning up OVS bridges.\n\n The utility cleans up the integration bridges used by Neutron.\n '
conf = setup_conf()
conf()
config.setup_logging()
do_main(conf)<|docstring|>Main method for cleaning up OVS bridges.
The utility cleans up the integration bridges used by Neutron.<|endoftext|> |
a4f91b0b8e2b7f5c1897a2c52f1835c4feef7cdadc8a2aded5738d2ab5cc7acf | def sql_to_markdown(sql_query: str, showindex: bool=False):
'Run a SQL querry on the netspeedlogger database and print a table of the results'
if database_has_results():
df = query(sql_query)
print(df.to_markdown(index=showindex))
else:
print('No results - run `netspeedlogger run` first') | Run a SQL querry on the netspeedlogger database and print a table of the results | netspeedlogger/cli.py | sql_to_markdown | radinplaid/netspeedlogger | 0 | python | def sql_to_markdown(sql_query: str, showindex: bool=False):
if database_has_results():
df = query(sql_query)
print(df.to_markdown(index=showindex))
else:
print('No results - run `netspeedlogger run` first') | def sql_to_markdown(sql_query: str, showindex: bool=False):
if database_has_results():
df = query(sql_query)
print(df.to_markdown(index=showindex))
else:
print('No results - run `netspeedlogger run` first')<|docstring|>Run a SQL querry on the netspeedlogger database and print a table of the results<|endoftext|> |
48581f9342b7bf5d963eb30dfea8ba43be5bf1d0f2c89d0792c8ee9c76a6dfd4 | def results():
'Show all results from the netspeedlogger database\n\n If there are more than 10000 results, will show the first 10000\n '
sql_to_markdown("select substr(timestamp,1,19) as 'Date Time', download_speed/(1024*1024) as 'Download Speed (Mb/s)', upload_speed/(1024*1024) as 'Upload Speed (Mb/s)', bytes_sent/(1024) as 'kB Sent', bytes_received/(1024) as 'kB Recieved', server_id as 'Server ID', server_host as 'Server Host', ping as 'Ping (ms)' from netspeedlogger limit 10000") | Show all results from the netspeedlogger database
If there are more than 10000 results, will show the first 10000 | netspeedlogger/cli.py | results | radinplaid/netspeedlogger | 0 | python | def results():
'Show all results from the netspeedlogger database\n\n If there are more than 10000 results, will show the first 10000\n '
sql_to_markdown("select substr(timestamp,1,19) as 'Date Time', download_speed/(1024*1024) as 'Download Speed (Mb/s)', upload_speed/(1024*1024) as 'Upload Speed (Mb/s)', bytes_sent/(1024) as 'kB Sent', bytes_received/(1024) as 'kB Recieved', server_id as 'Server ID', server_host as 'Server Host', ping as 'Ping (ms)' from netspeedlogger limit 10000") | def results():
'Show all results from the netspeedlogger database\n\n If there are more than 10000 results, will show the first 10000\n '
sql_to_markdown("select substr(timestamp,1,19) as 'Date Time', download_speed/(1024*1024) as 'Download Speed (Mb/s)', upload_speed/(1024*1024) as 'Upload Speed (Mb/s)', bytes_sent/(1024) as 'kB Sent', bytes_received/(1024) as 'kB Recieved', server_id as 'Server ID', server_host as 'Server Host', ping as 'Ping (ms)' from netspeedlogger limit 10000")<|docstring|>Show all results from the netspeedlogger database
If there are more than 10000 results, will show the first 10000<|endoftext|> |
edae61c729bccb319cf155d3007a1d6547f634c560e0634bcf18048a4e87fde1 | def summary():
'Display summary of internet speed test results as a table'
if database_has_results():
df = query("select substr(timestamp,1,19) as 'Date Time', download_speed/(1024*1024) as 'Download Speed (Mb/s)', upload_speed/(1024*1024) as 'Upload Speed (Mb/s)', ping as 'Ping (ms)' from netspeedlogger ")
print(df.describe().to_markdown(index=True))
else:
print('No results - run `netspeedlogger run` first') | Display summary of internet speed test results as a table | netspeedlogger/cli.py | summary | radinplaid/netspeedlogger | 0 | python | def summary():
if database_has_results():
df = query("select substr(timestamp,1,19) as 'Date Time', download_speed/(1024*1024) as 'Download Speed (Mb/s)', upload_speed/(1024*1024) as 'Upload Speed (Mb/s)', ping as 'Ping (ms)' from netspeedlogger ")
print(df.describe().to_markdown(index=True))
else:
print('No results - run `netspeedlogger run` first') | def summary():
if database_has_results():
df = query("select substr(timestamp,1,19) as 'Date Time', download_speed/(1024*1024) as 'Download Speed (Mb/s)', upload_speed/(1024*1024) as 'Upload Speed (Mb/s)', ping as 'Ping (ms)' from netspeedlogger ")
print(df.describe().to_markdown(index=True))
else:
print('No results - run `netspeedlogger run` first')<|docstring|>Display summary of internet speed test results as a table<|endoftext|> |
4df2214f78d2dcbed7d5e4058eb8bb50a92542387b380acf66b83d35c6d6e77a | def speedtest():
'Run an internet speed test using speedtest-cli and save the results to a local sqlite database'
print('netspeedlogger speedtest')
print(('=' * len('netspeedlogger speedtest')))
print('Starting to run an internet speed test, and logging the output')
results_dict = run_speedtest()
df = speedtest_dict_to_dataframe(results_dict)
write_speedtest_to_database(df)
print('Speedtest complete. Results:')
print(df.to_markdown(index=False)) | Run an internet speed test using speedtest-cli and save the results to a local sqlite database | netspeedlogger/cli.py | speedtest | radinplaid/netspeedlogger | 0 | python | def speedtest():
print('netspeedlogger speedtest')
print(('=' * len('netspeedlogger speedtest')))
print('Starting to run an internet speed test, and logging the output')
results_dict = run_speedtest()
df = speedtest_dict_to_dataframe(results_dict)
write_speedtest_to_database(df)
print('Speedtest complete. Results:')
print(df.to_markdown(index=False)) | def speedtest():
print('netspeedlogger speedtest')
print(('=' * len('netspeedlogger speedtest')))
print('Starting to run an internet speed test, and logging the output')
results_dict = run_speedtest()
df = speedtest_dict_to_dataframe(results_dict)
write_speedtest_to_database(df)
print('Speedtest complete. Results:')
print(df.to_markdown(index=False))<|docstring|>Run an internet speed test using speedtest-cli and save the results to a local sqlite database<|endoftext|> |
89d836977e3548190a6215d1b92523348d642435b76837f0ae670e919ae68bda | def delete_database():
'Run a SQL querry on the netspeedlogger database and print a table of the results'
db_path = get_database_path()
print(f'Deleting netspeedlogger database at path: `{db_path}`')
print("Are you sure you want to delete the whole database? Input 'y' for yes or 'n' for no")
for i in range(10):
confirmation = input("Please type 'y' for Yes or 'n' for No")
if (confirmation == 'n'):
return 'Not deleting database'
elif (confirmation == 'y'):
delete_database_if_exists()
return 'Database deleted' | Run a SQL querry on the netspeedlogger database and print a table of the results | netspeedlogger/cli.py | delete_database | radinplaid/netspeedlogger | 0 | python | def delete_database():
db_path = get_database_path()
print(f'Deleting netspeedlogger database at path: `{db_path}`')
print("Are you sure you want to delete the whole database? Input 'y' for yes or 'n' for no")
for i in range(10):
confirmation = input("Please type 'y' for Yes or 'n' for No")
if (confirmation == 'n'):
return 'Not deleting database'
elif (confirmation == 'y'):
delete_database_if_exists()
return 'Database deleted' | def delete_database():
db_path = get_database_path()
print(f'Deleting netspeedlogger database at path: `{db_path}`')
print("Are you sure you want to delete the whole database? Input 'y' for yes or 'n' for no")
for i in range(10):
confirmation = input("Please type 'y' for Yes or 'n' for No")
if (confirmation == 'n'):
return 'Not deleting database'
elif (confirmation == 'y'):
delete_database_if_exists()
return 'Database deleted'<|docstring|>Run a SQL querry on the netspeedlogger database and print a table of the results<|endoftext|> |
194f9f4f213fc22ba71a2c93a756b58b424298f3cb7601f7c6c3a3bb8cd0497f | def get_partitions(self, source):
'Process of read partitions data/ground truth'
paths = self._get_partitions(source)
dataset = dict()
for i in self.partitions:
dataset[i] = {'dt': [], 'gt': []}
for item in paths[i]:
img = cv2.imread(os.path.join(source, item[0]), cv2.IMREAD_GRAYSCALE)
img = np.array(img[(item[2][0]:item[2][1], item[2][2]:item[2][3])], dtype=np.uint8)
dataset[i]['dt'].append(img)
dataset[i]['gt'].append(item[1])
return dataset | Process of read partitions data/ground truth | src/transform/rimes.py | get_partitions | keyochali/handwritten-text-recognition | 2 | python | def get_partitions(self, source):
paths = self._get_partitions(source)
dataset = dict()
for i in self.partitions:
dataset[i] = {'dt': [], 'gt': []}
for item in paths[i]:
img = cv2.imread(os.path.join(source, item[0]), cv2.IMREAD_GRAYSCALE)
img = np.array(img[(item[2][0]:item[2][1], item[2][2]:item[2][3])], dtype=np.uint8)
dataset[i]['dt'].append(img)
dataset[i]['gt'].append(item[1])
return dataset | def get_partitions(self, source):
paths = self._get_partitions(source)
dataset = dict()
for i in self.partitions:
dataset[i] = {'dt': [], 'gt': []}
for item in paths[i]:
img = cv2.imread(os.path.join(source, item[0]), cv2.IMREAD_GRAYSCALE)
img = np.array(img[(item[2][0]:item[2][1], item[2][2]:item[2][3])], dtype=np.uint8)
dataset[i]['dt'].append(img)
dataset[i]['gt'].append(item[1])
return dataset<|docstring|>Process of read partitions data/ground truth<|endoftext|> |
3f60b1d39849555084fe8eb0d34c90e517a4bfa0a62db1b99c48d339faab5373 | def _get_partitions(self, source):
'Read the partitions file'
def generate(xml, subpath, partition, validation=False):
xml = ET.parse(os.path.join(source, xml)).getroot()
dt = []
for page_tag in xml:
page_path = page_tag.attrib['FileName']
for (i, line_tag) in enumerate(page_tag.iter('Line')):
text = html.unescape(line_tag.attrib['Value'])
text = ' '.join(text.split())
if (len(text) > 3):
bound = [abs(int(line_tag.attrib['Top'])), abs(int(line_tag.attrib['Bottom'])), abs(int(line_tag.attrib['Left'])), abs(int(line_tag.attrib['Right']))]
dt.append([os.path.join(subpath, page_path), text, bound])
if validation:
index = int((len(dt) * 0.9))
partition['valid'] = dt[index:]
partition['train'] = dt[:index]
else:
partition['test'] = dt
partition = dict()
generate('training_2011.xml', 'training_2011', partition, validation=True)
generate('eval_2011_annotated.xml', 'eval_2011', partition, validation=False)
return partition | Read the partitions file | src/transform/rimes.py | _get_partitions | keyochali/handwritten-text-recognition | 2 | python | def _get_partitions(self, source):
def generate(xml, subpath, partition, validation=False):
xml = ET.parse(os.path.join(source, xml)).getroot()
dt = []
for page_tag in xml:
page_path = page_tag.attrib['FileName']
for (i, line_tag) in enumerate(page_tag.iter('Line')):
text = html.unescape(line_tag.attrib['Value'])
text = ' '.join(text.split())
if (len(text) > 3):
bound = [abs(int(line_tag.attrib['Top'])), abs(int(line_tag.attrib['Bottom'])), abs(int(line_tag.attrib['Left'])), abs(int(line_tag.attrib['Right']))]
dt.append([os.path.join(subpath, page_path), text, bound])
if validation:
index = int((len(dt) * 0.9))
partition['valid'] = dt[index:]
partition['train'] = dt[:index]
else:
partition['test'] = dt
partition = dict()
generate('training_2011.xml', 'training_2011', partition, validation=True)
generate('eval_2011_annotated.xml', 'eval_2011', partition, validation=False)
return partition | def _get_partitions(self, source):
def generate(xml, subpath, partition, validation=False):
xml = ET.parse(os.path.join(source, xml)).getroot()
dt = []
for page_tag in xml:
page_path = page_tag.attrib['FileName']
for (i, line_tag) in enumerate(page_tag.iter('Line')):
text = html.unescape(line_tag.attrib['Value'])
text = ' '.join(text.split())
if (len(text) > 3):
bound = [abs(int(line_tag.attrib['Top'])), abs(int(line_tag.attrib['Bottom'])), abs(int(line_tag.attrib['Left'])), abs(int(line_tag.attrib['Right']))]
dt.append([os.path.join(subpath, page_path), text, bound])
if validation:
index = int((len(dt) * 0.9))
partition['valid'] = dt[index:]
partition['train'] = dt[:index]
else:
partition['test'] = dt
partition = dict()
generate('training_2011.xml', 'training_2011', partition, validation=True)
generate('eval_2011_annotated.xml', 'eval_2011', partition, validation=False)
return partition<|docstring|>Read the partitions file<|endoftext|> |
16eec2c4e118a67dda121b8f30f28c0df080b0a85fbaad06ac75d866b7bdccb9 | def get_app_sec_failover_hostnames(config_id: Optional[int]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetAppSecFailoverHostnamesResult:
'\n **Scopes**: Security configuration\n\n Returns a list of the failover hostnames in a configuration. The returned information is described in the [List failover hostnames](https://developer.akamai.com/api/cloud_security/application_security/v1.html#getfailoverhostnames) section of the Application Security API.\n\n **Related API Endpoint**: [/appsec/v1/configs/{configId}/failover-hostnames](https://developer.akamai.com/api/cloud_security/application_security/v1.html#getfailoverhostnames)\n\n ## Example Usage\n\n Basic usage:\n\n ```python\n import pulumi\n import pulumi_akamai as akamai\n\n configuration = akamai.get_app_sec_configuration(name="Documentation")\n failover_hostnames_app_sec_failover_hostnames = akamai.get_app_sec_failover_hostnames(config_id=configuration.config_id)\n pulumi.export("failoverHostnames", failover_hostnames_app_sec_failover_hostnames.hostnames)\n pulumi.export("failoverHostnamesOutput", failover_hostnames_app_sec_failover_hostnames.output_text)\n pulumi.export("failoverHostnamesJson", failover_hostnames_app_sec_failover_hostnames.json)\n ```\n ## Output Options\n\n The following options can be used to determine the information returned, and how that returned information is formatted:\n\n - `hostnames`. List of the failover hostnames.\n - `json`. JSON-formatted list of the failover hostnames.\n\n\n :param int config_id: . Unique identifier of the security configuration associated with the failover hosts.\n '
__args__ = dict()
__args__['configId'] = config_id
if (opts is None):
opts = pulumi.InvokeOptions()
if (opts.version is None):
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('akamai:index/getAppSecFailoverHostnames:getAppSecFailoverHostnames', __args__, opts=opts, typ=GetAppSecFailoverHostnamesResult).value
return AwaitableGetAppSecFailoverHostnamesResult(config_id=__ret__.config_id, hostnames=__ret__.hostnames, id=__ret__.id, json=__ret__.json, output_text=__ret__.output_text) | **Scopes**: Security configuration
Returns a list of the failover hostnames in a configuration. The returned information is described in the [List failover hostnames](https://developer.akamai.com/api/cloud_security/application_security/v1.html#getfailoverhostnames) section of the Application Security API.
**Related API Endpoint**: [/appsec/v1/configs/{configId}/failover-hostnames](https://developer.akamai.com/api/cloud_security/application_security/v1.html#getfailoverhostnames)
## Example Usage
Basic usage:
```python
import pulumi
import pulumi_akamai as akamai
configuration = akamai.get_app_sec_configuration(name="Documentation")
failover_hostnames_app_sec_failover_hostnames = akamai.get_app_sec_failover_hostnames(config_id=configuration.config_id)
pulumi.export("failoverHostnames", failover_hostnames_app_sec_failover_hostnames.hostnames)
pulumi.export("failoverHostnamesOutput", failover_hostnames_app_sec_failover_hostnames.output_text)
pulumi.export("failoverHostnamesJson", failover_hostnames_app_sec_failover_hostnames.json)
```
## Output Options
The following options can be used to determine the information returned, and how that returned information is formatted:
- `hostnames`. List of the failover hostnames.
- `json`. JSON-formatted list of the failover hostnames.
:param int config_id: . Unique identifier of the security configuration associated with the failover hosts. | sdk/python/pulumi_akamai/get_app_sec_failover_hostnames.py | get_app_sec_failover_hostnames | pulumi/pulumi-akamai | 3 | python | def get_app_sec_failover_hostnames(config_id: Optional[int]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetAppSecFailoverHostnamesResult:
'\n **Scopes**: Security configuration\n\n Returns a list of the failover hostnames in a configuration. The returned information is described in the [List failover hostnames](https://developer.akamai.com/api/cloud_security/application_security/v1.html#getfailoverhostnames) section of the Application Security API.\n\n **Related API Endpoint**: [/appsec/v1/configs/{configId}/failover-hostnames](https://developer.akamai.com/api/cloud_security/application_security/v1.html#getfailoverhostnames)\n\n ## Example Usage\n\n Basic usage:\n\n ```python\n import pulumi\n import pulumi_akamai as akamai\n\n configuration = akamai.get_app_sec_configuration(name="Documentation")\n failover_hostnames_app_sec_failover_hostnames = akamai.get_app_sec_failover_hostnames(config_id=configuration.config_id)\n pulumi.export("failoverHostnames", failover_hostnames_app_sec_failover_hostnames.hostnames)\n pulumi.export("failoverHostnamesOutput", failover_hostnames_app_sec_failover_hostnames.output_text)\n pulumi.export("failoverHostnamesJson", failover_hostnames_app_sec_failover_hostnames.json)\n ```\n ## Output Options\n\n The following options can be used to determine the information returned, and how that returned information is formatted:\n\n - `hostnames`. List of the failover hostnames.\n - `json`. JSON-formatted list of the failover hostnames.\n\n\n :param int config_id: . Unique identifier of the security configuration associated with the failover hosts.\n '
__args__ = dict()
__args__['configId'] = config_id
if (opts is None):
opts = pulumi.InvokeOptions()
if (opts.version is None):
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('akamai:index/getAppSecFailoverHostnames:getAppSecFailoverHostnames', __args__, opts=opts, typ=GetAppSecFailoverHostnamesResult).value
return AwaitableGetAppSecFailoverHostnamesResult(config_id=__ret__.config_id, hostnames=__ret__.hostnames, id=__ret__.id, json=__ret__.json, output_text=__ret__.output_text) | def get_app_sec_failover_hostnames(config_id: Optional[int]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetAppSecFailoverHostnamesResult:
'\n **Scopes**: Security configuration\n\n Returns a list of the failover hostnames in a configuration. The returned information is described in the [List failover hostnames](https://developer.akamai.com/api/cloud_security/application_security/v1.html#getfailoverhostnames) section of the Application Security API.\n\n **Related API Endpoint**: [/appsec/v1/configs/{configId}/failover-hostnames](https://developer.akamai.com/api/cloud_security/application_security/v1.html#getfailoverhostnames)\n\n ## Example Usage\n\n Basic usage:\n\n ```python\n import pulumi\n import pulumi_akamai as akamai\n\n configuration = akamai.get_app_sec_configuration(name="Documentation")\n failover_hostnames_app_sec_failover_hostnames = akamai.get_app_sec_failover_hostnames(config_id=configuration.config_id)\n pulumi.export("failoverHostnames", failover_hostnames_app_sec_failover_hostnames.hostnames)\n pulumi.export("failoverHostnamesOutput", failover_hostnames_app_sec_failover_hostnames.output_text)\n pulumi.export("failoverHostnamesJson", failover_hostnames_app_sec_failover_hostnames.json)\n ```\n ## Output Options\n\n The following options can be used to determine the information returned, and how that returned information is formatted:\n\n - `hostnames`. List of the failover hostnames.\n - `json`. JSON-formatted list of the failover hostnames.\n\n\n :param int config_id: . Unique identifier of the security configuration associated with the failover hosts.\n '
__args__ = dict()
__args__['configId'] = config_id
if (opts is None):
opts = pulumi.InvokeOptions()
if (opts.version is None):
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('akamai:index/getAppSecFailoverHostnames:getAppSecFailoverHostnames', __args__, opts=opts, typ=GetAppSecFailoverHostnamesResult).value
return AwaitableGetAppSecFailoverHostnamesResult(config_id=__ret__.config_id, hostnames=__ret__.hostnames, id=__ret__.id, json=__ret__.json, output_text=__ret__.output_text)<|docstring|>**Scopes**: Security configuration
Returns a list of the failover hostnames in a configuration. The returned information is described in the [List failover hostnames](https://developer.akamai.com/api/cloud_security/application_security/v1.html#getfailoverhostnames) section of the Application Security API.
**Related API Endpoint**: [/appsec/v1/configs/{configId}/failover-hostnames](https://developer.akamai.com/api/cloud_security/application_security/v1.html#getfailoverhostnames)
## Example Usage
Basic usage:
```python
import pulumi
import pulumi_akamai as akamai
configuration = akamai.get_app_sec_configuration(name="Documentation")
failover_hostnames_app_sec_failover_hostnames = akamai.get_app_sec_failover_hostnames(config_id=configuration.config_id)
pulumi.export("failoverHostnames", failover_hostnames_app_sec_failover_hostnames.hostnames)
pulumi.export("failoverHostnamesOutput", failover_hostnames_app_sec_failover_hostnames.output_text)
pulumi.export("failoverHostnamesJson", failover_hostnames_app_sec_failover_hostnames.json)
```
## Output Options
The following options can be used to determine the information returned, and how that returned information is formatted:
- `hostnames`. List of the failover hostnames.
- `json`. JSON-formatted list of the failover hostnames.
:param int config_id: . Unique identifier of the security configuration associated with the failover hosts.<|endoftext|> |
0a41fe5df1928f72cd4445f5ffb7ca84937eca0fb9314a2b5e4079cda68ec434 | @_utilities.lift_output_func(get_app_sec_failover_hostnames)
def get_app_sec_failover_hostnames_output(config_id: Optional[pulumi.Input[int]]=None, opts: Optional[pulumi.InvokeOptions]=None) -> pulumi.Output[GetAppSecFailoverHostnamesResult]:
'\n **Scopes**: Security configuration\n\n Returns a list of the failover hostnames in a configuration. The returned information is described in the [List failover hostnames](https://developer.akamai.com/api/cloud_security/application_security/v1.html#getfailoverhostnames) section of the Application Security API.\n\n **Related API Endpoint**: [/appsec/v1/configs/{configId}/failover-hostnames](https://developer.akamai.com/api/cloud_security/application_security/v1.html#getfailoverhostnames)\n\n ## Example Usage\n\n Basic usage:\n\n ```python\n import pulumi\n import pulumi_akamai as akamai\n\n configuration = akamai.get_app_sec_configuration(name="Documentation")\n failover_hostnames_app_sec_failover_hostnames = akamai.get_app_sec_failover_hostnames(config_id=configuration.config_id)\n pulumi.export("failoverHostnames", failover_hostnames_app_sec_failover_hostnames.hostnames)\n pulumi.export("failoverHostnamesOutput", failover_hostnames_app_sec_failover_hostnames.output_text)\n pulumi.export("failoverHostnamesJson", failover_hostnames_app_sec_failover_hostnames.json)\n ```\n ## Output Options\n\n The following options can be used to determine the information returned, and how that returned information is formatted:\n\n - `hostnames`. List of the failover hostnames.\n - `json`. JSON-formatted list of the failover hostnames.\n\n\n :param int config_id: . Unique identifier of the security configuration associated with the failover hosts.\n '
... | **Scopes**: Security configuration
Returns a list of the failover hostnames in a configuration. The returned information is described in the [List failover hostnames](https://developer.akamai.com/api/cloud_security/application_security/v1.html#getfailoverhostnames) section of the Application Security API.
**Related API Endpoint**: [/appsec/v1/configs/{configId}/failover-hostnames](https://developer.akamai.com/api/cloud_security/application_security/v1.html#getfailoverhostnames)
## Example Usage
Basic usage:
```python
import pulumi
import pulumi_akamai as akamai
configuration = akamai.get_app_sec_configuration(name="Documentation")
failover_hostnames_app_sec_failover_hostnames = akamai.get_app_sec_failover_hostnames(config_id=configuration.config_id)
pulumi.export("failoverHostnames", failover_hostnames_app_sec_failover_hostnames.hostnames)
pulumi.export("failoverHostnamesOutput", failover_hostnames_app_sec_failover_hostnames.output_text)
pulumi.export("failoverHostnamesJson", failover_hostnames_app_sec_failover_hostnames.json)
```
## Output Options
The following options can be used to determine the information returned, and how that returned information is formatted:
- `hostnames`. List of the failover hostnames.
- `json`. JSON-formatted list of the failover hostnames.
:param int config_id: . Unique identifier of the security configuration associated with the failover hosts. | sdk/python/pulumi_akamai/get_app_sec_failover_hostnames.py | get_app_sec_failover_hostnames_output | pulumi/pulumi-akamai | 3 | python | @_utilities.lift_output_func(get_app_sec_failover_hostnames)
def get_app_sec_failover_hostnames_output(config_id: Optional[pulumi.Input[int]]=None, opts: Optional[pulumi.InvokeOptions]=None) -> pulumi.Output[GetAppSecFailoverHostnamesResult]:
'\n **Scopes**: Security configuration\n\n Returns a list of the failover hostnames in a configuration. The returned information is described in the [List failover hostnames](https://developer.akamai.com/api/cloud_security/application_security/v1.html#getfailoverhostnames) section of the Application Security API.\n\n **Related API Endpoint**: [/appsec/v1/configs/{configId}/failover-hostnames](https://developer.akamai.com/api/cloud_security/application_security/v1.html#getfailoverhostnames)\n\n ## Example Usage\n\n Basic usage:\n\n ```python\n import pulumi\n import pulumi_akamai as akamai\n\n configuration = akamai.get_app_sec_configuration(name="Documentation")\n failover_hostnames_app_sec_failover_hostnames = akamai.get_app_sec_failover_hostnames(config_id=configuration.config_id)\n pulumi.export("failoverHostnames", failover_hostnames_app_sec_failover_hostnames.hostnames)\n pulumi.export("failoverHostnamesOutput", failover_hostnames_app_sec_failover_hostnames.output_text)\n pulumi.export("failoverHostnamesJson", failover_hostnames_app_sec_failover_hostnames.json)\n ```\n ## Output Options\n\n The following options can be used to determine the information returned, and how that returned information is formatted:\n\n - `hostnames`. List of the failover hostnames.\n - `json`. JSON-formatted list of the failover hostnames.\n\n\n :param int config_id: . Unique identifier of the security configuration associated with the failover hosts.\n '
... | @_utilities.lift_output_func(get_app_sec_failover_hostnames)
def get_app_sec_failover_hostnames_output(config_id: Optional[pulumi.Input[int]]=None, opts: Optional[pulumi.InvokeOptions]=None) -> pulumi.Output[GetAppSecFailoverHostnamesResult]:
'\n **Scopes**: Security configuration\n\n Returns a list of the failover hostnames in a configuration. The returned information is described in the [List failover hostnames](https://developer.akamai.com/api/cloud_security/application_security/v1.html#getfailoverhostnames) section of the Application Security API.\n\n **Related API Endpoint**: [/appsec/v1/configs/{configId}/failover-hostnames](https://developer.akamai.com/api/cloud_security/application_security/v1.html#getfailoverhostnames)\n\n ## Example Usage\n\n Basic usage:\n\n ```python\n import pulumi\n import pulumi_akamai as akamai\n\n configuration = akamai.get_app_sec_configuration(name="Documentation")\n failover_hostnames_app_sec_failover_hostnames = akamai.get_app_sec_failover_hostnames(config_id=configuration.config_id)\n pulumi.export("failoverHostnames", failover_hostnames_app_sec_failover_hostnames.hostnames)\n pulumi.export("failoverHostnamesOutput", failover_hostnames_app_sec_failover_hostnames.output_text)\n pulumi.export("failoverHostnamesJson", failover_hostnames_app_sec_failover_hostnames.json)\n ```\n ## Output Options\n\n The following options can be used to determine the information returned, and how that returned information is formatted:\n\n - `hostnames`. List of the failover hostnames.\n - `json`. JSON-formatted list of the failover hostnames.\n\n\n :param int config_id: . Unique identifier of the security configuration associated with the failover hosts.\n '
...<|docstring|>**Scopes**: Security configuration
Returns a list of the failover hostnames in a configuration. The returned information is described in the [List failover hostnames](https://developer.akamai.com/api/cloud_security/application_security/v1.html#getfailoverhostnames) section of the Application Security API.
**Related API Endpoint**: [/appsec/v1/configs/{configId}/failover-hostnames](https://developer.akamai.com/api/cloud_security/application_security/v1.html#getfailoverhostnames)
## Example Usage
Basic usage:
```python
import pulumi
import pulumi_akamai as akamai
configuration = akamai.get_app_sec_configuration(name="Documentation")
failover_hostnames_app_sec_failover_hostnames = akamai.get_app_sec_failover_hostnames(config_id=configuration.config_id)
pulumi.export("failoverHostnames", failover_hostnames_app_sec_failover_hostnames.hostnames)
pulumi.export("failoverHostnamesOutput", failover_hostnames_app_sec_failover_hostnames.output_text)
pulumi.export("failoverHostnamesJson", failover_hostnames_app_sec_failover_hostnames.json)
```
## Output Options
The following options can be used to determine the information returned, and how that returned information is formatted:
- `hostnames`. List of the failover hostnames.
- `json`. JSON-formatted list of the failover hostnames.
:param int config_id: . Unique identifier of the security configuration associated with the failover hosts.<|endoftext|> |
bcf5b51a327014088b63f706e1dc3987198031e1f0241bd10b06cf4dd5bcb53c | @property
@pulumi.getter
def id(self) -> str:
'\n The provider-assigned unique ID for this managed resource.\n '
return pulumi.get(self, 'id') | The provider-assigned unique ID for this managed resource. | sdk/python/pulumi_akamai/get_app_sec_failover_hostnames.py | id | pulumi/pulumi-akamai | 3 | python | @property
@pulumi.getter
def id(self) -> str:
'\n \n '
return pulumi.get(self, 'id') | @property
@pulumi.getter
def id(self) -> str:
'\n \n '
return pulumi.get(self, 'id')<|docstring|>The provider-assigned unique ID for this managed resource.<|endoftext|> |
99572c002fce0d5ba4f68c8a5eb5890985f89922939bdd4a21d0c5a5bde50671 | def setUp(self):
'Sets up the needed objects used throughout the test.'
self._resolver_context = context.Context()
test_path = self._GetTestFilePath(['fvdetest.qcow2'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_OS, location=test_path)
test_qcow_path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_QCOW, parent=test_os_path_spec)
self._gpt_path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_GPT, location='/p1', parent=test_qcow_path_spec)
self._cs_path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, parent=self._gpt_path_spec) | Sets up the needed objects used throughout the test. | tests/vfs/cs_file_system.py | setUp | jaegeral/dfvfs | 0 | python | def setUp(self):
self._resolver_context = context.Context()
test_path = self._GetTestFilePath(['fvdetest.qcow2'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_OS, location=test_path)
test_qcow_path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_QCOW, parent=test_os_path_spec)
self._gpt_path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_GPT, location='/p1', parent=test_qcow_path_spec)
self._cs_path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, parent=self._gpt_path_spec) | def setUp(self):
self._resolver_context = context.Context()
test_path = self._GetTestFilePath(['fvdetest.qcow2'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_OS, location=test_path)
test_qcow_path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_QCOW, parent=test_os_path_spec)
self._gpt_path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_GPT, location='/p1', parent=test_qcow_path_spec)
self._cs_path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, parent=self._gpt_path_spec)<|docstring|>Sets up the needed objects used throughout the test.<|endoftext|> |
dd029afa706cbbe66b58905cf013e0290da5ca47bd22950a743f7117c7cedf25 | def tearDown(self):
'Cleans up the needed objects used throughout the test.'
self._resolver_context.Empty() | Cleans up the needed objects used throughout the test. | tests/vfs/cs_file_system.py | tearDown | jaegeral/dfvfs | 0 | python | def tearDown(self):
self._resolver_context.Empty() | def tearDown(self):
self._resolver_context.Empty()<|docstring|>Cleans up the needed objects used throughout the test.<|endoftext|> |
e311679ebf1fadbee5b334f89e54b53be7228e5436bdfd770d5f2b0e44a26218 | def testOpenAndClose(self):
'Test the open and close functionality.'
file_system = cs_file_system.CSFileSystem(self._resolver_context, self._cs_path_spec)
self.assertIsNotNone(file_system)
file_system.Open() | Test the open and close functionality. | tests/vfs/cs_file_system.py | testOpenAndClose | jaegeral/dfvfs | 0 | python | def testOpenAndClose(self):
file_system = cs_file_system.CSFileSystem(self._resolver_context, self._cs_path_spec)
self.assertIsNotNone(file_system)
file_system.Open() | def testOpenAndClose(self):
file_system = cs_file_system.CSFileSystem(self._resolver_context, self._cs_path_spec)
self.assertIsNotNone(file_system)
file_system.Open()<|docstring|>Test the open and close functionality.<|endoftext|> |
cef935b3d35bd7f3b693e9976023f5b9ce389e1b17ea31959b354471d86f9162 | def testFileEntryExistsByPathSpec(self):
'Test the file entry exists by path specification functionality.'
file_system = cs_file_system.CSFileSystem(self._resolver_context, self._cs_path_spec)
self.assertIsNotNone(file_system)
file_system.Open()
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, location='/', parent=self._gpt_path_spec)
self.assertTrue(file_system.FileEntryExistsByPathSpec(path_spec))
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, parent=self._gpt_path_spec, volume_index=0)
self.assertTrue(file_system.FileEntryExistsByPathSpec(path_spec))
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, location='/cs1', parent=self._gpt_path_spec)
self.assertTrue(file_system.FileEntryExistsByPathSpec(path_spec))
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, parent=self._gpt_path_spec, volume_index=9)
self.assertFalse(file_system.FileEntryExistsByPathSpec(path_spec))
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, location='/cs0', parent=self._gpt_path_spec)
self.assertFalse(file_system.FileEntryExistsByPathSpec(path_spec))
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, location='/cs9', parent=self._gpt_path_spec)
self.assertFalse(file_system.FileEntryExistsByPathSpec(path_spec)) | Test the file entry exists by path specification functionality. | tests/vfs/cs_file_system.py | testFileEntryExistsByPathSpec | jaegeral/dfvfs | 0 | python | def testFileEntryExistsByPathSpec(self):
file_system = cs_file_system.CSFileSystem(self._resolver_context, self._cs_path_spec)
self.assertIsNotNone(file_system)
file_system.Open()
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, location='/', parent=self._gpt_path_spec)
self.assertTrue(file_system.FileEntryExistsByPathSpec(path_spec))
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, parent=self._gpt_path_spec, volume_index=0)
self.assertTrue(file_system.FileEntryExistsByPathSpec(path_spec))
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, location='/cs1', parent=self._gpt_path_spec)
self.assertTrue(file_system.FileEntryExistsByPathSpec(path_spec))
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, parent=self._gpt_path_spec, volume_index=9)
self.assertFalse(file_system.FileEntryExistsByPathSpec(path_spec))
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, location='/cs0', parent=self._gpt_path_spec)
self.assertFalse(file_system.FileEntryExistsByPathSpec(path_spec))
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, location='/cs9', parent=self._gpt_path_spec)
self.assertFalse(file_system.FileEntryExistsByPathSpec(path_spec)) | def testFileEntryExistsByPathSpec(self):
file_system = cs_file_system.CSFileSystem(self._resolver_context, self._cs_path_spec)
self.assertIsNotNone(file_system)
file_system.Open()
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, location='/', parent=self._gpt_path_spec)
self.assertTrue(file_system.FileEntryExistsByPathSpec(path_spec))
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, parent=self._gpt_path_spec, volume_index=0)
self.assertTrue(file_system.FileEntryExistsByPathSpec(path_spec))
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, location='/cs1', parent=self._gpt_path_spec)
self.assertTrue(file_system.FileEntryExistsByPathSpec(path_spec))
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, parent=self._gpt_path_spec, volume_index=9)
self.assertFalse(file_system.FileEntryExistsByPathSpec(path_spec))
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, location='/cs0', parent=self._gpt_path_spec)
self.assertFalse(file_system.FileEntryExistsByPathSpec(path_spec))
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, location='/cs9', parent=self._gpt_path_spec)
self.assertFalse(file_system.FileEntryExistsByPathSpec(path_spec))<|docstring|>Test the file entry exists by path specification functionality.<|endoftext|> |
04412eeb821aa5475e8e0d85edf0856562520aac4aceae2faf8724fa60cc1aeb | def testGetFileEntryByPathSpec(self):
'Tests the GetFileEntryByPathSpec function.'
file_system = cs_file_system.CSFileSystem(self._resolver_context, self._cs_path_spec)
self.assertIsNotNone(file_system)
file_system.Open()
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, location='/', parent=self._gpt_path_spec)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.name, '')
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, parent=self._gpt_path_spec, volume_index=0)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.name, 'cs1')
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, location='/cs1', parent=self._gpt_path_spec)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.name, 'cs1')
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, parent=self._gpt_path_spec, volume_index=9)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNone(file_entry)
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, location='/cs0', parent=self._gpt_path_spec)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNone(file_entry)
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, location='/cs9', parent=self._gpt_path_spec)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNone(file_entry) | Tests the GetFileEntryByPathSpec function. | tests/vfs/cs_file_system.py | testGetFileEntryByPathSpec | jaegeral/dfvfs | 0 | python | def testGetFileEntryByPathSpec(self):
file_system = cs_file_system.CSFileSystem(self._resolver_context, self._cs_path_spec)
self.assertIsNotNone(file_system)
file_system.Open()
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, location='/', parent=self._gpt_path_spec)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.name, )
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, parent=self._gpt_path_spec, volume_index=0)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.name, 'cs1')
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, location='/cs1', parent=self._gpt_path_spec)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.name, 'cs1')
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, parent=self._gpt_path_spec, volume_index=9)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNone(file_entry)
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, location='/cs0', parent=self._gpt_path_spec)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNone(file_entry)
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, location='/cs9', parent=self._gpt_path_spec)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNone(file_entry) | def testGetFileEntryByPathSpec(self):
file_system = cs_file_system.CSFileSystem(self._resolver_context, self._cs_path_spec)
self.assertIsNotNone(file_system)
file_system.Open()
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, location='/', parent=self._gpt_path_spec)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.name, )
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, parent=self._gpt_path_spec, volume_index=0)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.name, 'cs1')
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, location='/cs1', parent=self._gpt_path_spec)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.name, 'cs1')
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, parent=self._gpt_path_spec, volume_index=9)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNone(file_entry)
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, location='/cs0', parent=self._gpt_path_spec)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNone(file_entry)
path_spec = path_spec_factory.Factory.NewPathSpec(definitions.TYPE_INDICATOR_CS, location='/cs9', parent=self._gpt_path_spec)
file_entry = file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNone(file_entry)<|docstring|>Tests the GetFileEntryByPathSpec function.<|endoftext|> |
047a7c44d6ab0e479cb8d1a44de9660ec0f288f579a4313341a383845fd3e575 | def testGetRootFileEntry(self):
'Test the get root file entry functionality.'
file_system = cs_file_system.CSFileSystem(self._resolver_context, self._cs_path_spec)
self.assertIsNotNone(file_system)
file_system.Open()
file_entry = file_system.GetRootFileEntry()
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.name, '') | Test the get root file entry functionality. | tests/vfs/cs_file_system.py | testGetRootFileEntry | jaegeral/dfvfs | 0 | python | def testGetRootFileEntry(self):
file_system = cs_file_system.CSFileSystem(self._resolver_context, self._cs_path_spec)
self.assertIsNotNone(file_system)
file_system.Open()
file_entry = file_system.GetRootFileEntry()
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.name, ) | def testGetRootFileEntry(self):
file_system = cs_file_system.CSFileSystem(self._resolver_context, self._cs_path_spec)
self.assertIsNotNone(file_system)
file_system.Open()
file_entry = file_system.GetRootFileEntry()
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.name, )<|docstring|>Test the get root file entry functionality.<|endoftext|> |
732588fec966c5c3dd4f948a56b3a1194703f4e170521ce6c7ac6155fd021cc5 | def f(x):
'Identity'
return x | Identity | python/pyspark/sql/tests/test_udf.py | f | zoujhub/spark | 35,083 | python | def f(x):
return x | def f(x):
return x<|docstring|>Identity<|endoftext|> |
2a9282ce919ffbb7dbf50b63cc526f17a73a961918e1b80d1fb04d08114f7e51 | def download_weightscomp(ascii='ascii2', isotype='some'):
"\n Downloader function for the NIST Atomic Weights and Isotopic Compositions database\n\n Makes a GET request to download data; then extracts preformatted text\n\n Parameters\n ----------\n ascii: str\n GET request parameter, refer to the NIST docs\n (default: 'ascii')\n isotype: str\n GET request parameter, refer to the NIST docs\n (default: 'some')\n\n Returns\n -------\n str\n Preformatted text data\n\n "
logger.info('Downloading data from the NIST Atomic Weights and Isotopic Compositions Database.')
r = requests.get(WEIGHTSCOMP_URL, params={'ascii': ascii, 'isotype': isotype})
soup = BeautifulSoup(r.text, 'html5lib')
pre_text_data = soup.pre.get_text()
pre_text_data = pre_text_data.replace(u'\xa0', u' ')
return pre_text_data | Downloader function for the NIST Atomic Weights and Isotopic Compositions database
Makes a GET request to download data; then extracts preformatted text
Parameters
----------
ascii: str
GET request parameter, refer to the NIST docs
(default: 'ascii')
isotype: str
GET request parameter, refer to the NIST docs
(default: 'some')
Returns
-------
str
Preformatted text data | carsus/io/nist/weightscomp.py | download_weightscomp | epassaro/carsus | 21 | python | def download_weightscomp(ascii='ascii2', isotype='some'):
"\n Downloader function for the NIST Atomic Weights and Isotopic Compositions database\n\n Makes a GET request to download data; then extracts preformatted text\n\n Parameters\n ----------\n ascii: str\n GET request parameter, refer to the NIST docs\n (default: 'ascii')\n isotype: str\n GET request parameter, refer to the NIST docs\n (default: 'some')\n\n Returns\n -------\n str\n Preformatted text data\n\n "
logger.info('Downloading data from the NIST Atomic Weights and Isotopic Compositions Database.')
r = requests.get(WEIGHTSCOMP_URL, params={'ascii': ascii, 'isotype': isotype})
soup = BeautifulSoup(r.text, 'html5lib')
pre_text_data = soup.pre.get_text()
pre_text_data = pre_text_data.replace(u'\xa0', u' ')
return pre_text_data | def download_weightscomp(ascii='ascii2', isotype='some'):
"\n Downloader function for the NIST Atomic Weights and Isotopic Compositions database\n\n Makes a GET request to download data; then extracts preformatted text\n\n Parameters\n ----------\n ascii: str\n GET request parameter, refer to the NIST docs\n (default: 'ascii')\n isotype: str\n GET request parameter, refer to the NIST docs\n (default: 'some')\n\n Returns\n -------\n str\n Preformatted text data\n\n "
logger.info('Downloading data from the NIST Atomic Weights and Isotopic Compositions Database.')
r = requests.get(WEIGHTSCOMP_URL, params={'ascii': ascii, 'isotype': isotype})
soup = BeautifulSoup(r.text, 'html5lib')
pre_text_data = soup.pre.get_text()
pre_text_data = pre_text_data.replace(u'\xa0', u' ')
return pre_text_data<|docstring|>Downloader function for the NIST Atomic Weights and Isotopic Compositions database
Makes a GET request to download data; then extracts preformatted text
Parameters
----------
ascii: str
GET request parameter, refer to the NIST docs
(default: 'ascii')
isotype: str
GET request parameter, refer to the NIST docs
(default: 'some')
Returns
-------
str
Preformatted text data<|endoftext|> |
6598327ad814a31f75872e4e452b2ca8da9aa56a9eb0036b8d0163e26f566380 | def prepare_atomic_dataframe(self):
' Returns a new dataframe created from `base` and containing data *only* related to atoms '
atomic = self.base[ATOM_WEIGHT_COLS].reset_index(level=MASS_NUM_COL, drop=True)
atomic = atomic[(~ atomic.index.duplicated())]
atomic = self._prepare_atomic_weights(atomic)
atomic = atomic[pd.notnull(atomic[AW_VAL_COL])]
return atomic | Returns a new dataframe created from `base` and containing data *only* related to atoms | carsus/io/nist/weightscomp.py | prepare_atomic_dataframe | epassaro/carsus | 21 | python | def prepare_atomic_dataframe(self):
' '
atomic = self.base[ATOM_WEIGHT_COLS].reset_index(level=MASS_NUM_COL, drop=True)
atomic = atomic[(~ atomic.index.duplicated())]
atomic = self._prepare_atomic_weights(atomic)
atomic = atomic[pd.notnull(atomic[AW_VAL_COL])]
return atomic | def prepare_atomic_dataframe(self):
' '
atomic = self.base[ATOM_WEIGHT_COLS].reset_index(level=MASS_NUM_COL, drop=True)
atomic = atomic[(~ atomic.index.duplicated())]
atomic = self._prepare_atomic_weights(atomic)
atomic = atomic[pd.notnull(atomic[AW_VAL_COL])]
return atomic<|docstring|>Returns a new dataframe created from `base` and containing data *only* related to atoms<|endoftext|> |
3500eb548c1fc1278ca5adfa4967b43835eb082564ee1248eb37267cf09dedd4 | def prepare_isotope_dataframe(self):
' Returns a new dataframe created from `base` and containing data *only* related to isotopes '
pass | Returns a new dataframe created from `base` and containing data *only* related to isotopes | carsus/io/nist/weightscomp.py | prepare_isotope_dataframe | epassaro/carsus | 21 | python | def prepare_isotope_dataframe(self):
' '
pass | def prepare_isotope_dataframe(self):
' '
pass<|docstring|>Returns a new dataframe created from `base` and containing data *only* related to isotopes<|endoftext|> |
b6b3423dbfbaca40105360cd59726f84fb618cf4d0c391a51815170908be1627 | def ingest(self, atomic_weights=True):
' *Only* ingests atomic weights *for now* '
if (self.parser.base is None):
self.download()
if atomic_weights:
self.ingest_atomic_weights()
self.session.flush() | *Only* ingests atomic weights *for now* | carsus/io/nist/weightscomp.py | ingest | epassaro/carsus | 21 | python | def ingest(self, atomic_weights=True):
' '
if (self.parser.base is None):
self.download()
if atomic_weights:
self.ingest_atomic_weights()
self.session.flush() | def ingest(self, atomic_weights=True):
' '
if (self.parser.base is None):
self.download()
if atomic_weights:
self.ingest_atomic_weights()
self.session.flush()<|docstring|>*Only* ingests atomic weights *for now*<|endoftext|> |
0080836b7e917f90245324cf234b2c35fefc4396ac862cd11015ee2e21f54841 | def _get_version(self):
'Returns NIST Atomic Weights and Isotopic Components\n Database version.\n '
selector = 'td'
html = requests.get(WEIGHTSCOMP_VERSION_URL).text
bs = BeautifulSoup(html, 'html5lib')
version = bs.select(selector)
version = version[0].text.split()[1]
self.version = version | Returns NIST Atomic Weights and Isotopic Components
Database version. | carsus/io/nist/weightscomp.py | _get_version | epassaro/carsus | 21 | python | def _get_version(self):
'Returns NIST Atomic Weights and Isotopic Components\n Database version.\n '
selector = 'td'
html = requests.get(WEIGHTSCOMP_VERSION_URL).text
bs = BeautifulSoup(html, 'html5lib')
version = bs.select(selector)
version = version[0].text.split()[1]
self.version = version | def _get_version(self):
'Returns NIST Atomic Weights and Isotopic Components\n Database version.\n '
selector = 'td'
html = requests.get(WEIGHTSCOMP_VERSION_URL).text
bs = BeautifulSoup(html, 'html5lib')
version = bs.select(selector)
version = version[0].text.split()[1]
self.version = version<|docstring|>Returns NIST Atomic Weights and Isotopic Components
Database version.<|endoftext|> |
f1f59642a05c51f5b700ac4ff9138cb7101bed85a297786700a3c0a243c0a4dc | def to_hdf(self, fname):
'Dump the `base` attribute into an HDF5 file\n\n Parameters\n ----------\n fname : path\n Path to the HDF5 output file\n '
with pd.HDFStore(fname, 'w') as f:
f.put('/atom_data', self.base, min_itemsize={'symbol': 2, 'name': 15}) | Dump the `base` attribute into an HDF5 file
Parameters
----------
fname : path
Path to the HDF5 output file | carsus/io/nist/weightscomp.py | to_hdf | epassaro/carsus | 21 | python | def to_hdf(self, fname):
'Dump the `base` attribute into an HDF5 file\n\n Parameters\n ----------\n fname : path\n Path to the HDF5 output file\n '
with pd.HDFStore(fname, 'w') as f:
f.put('/atom_data', self.base, min_itemsize={'symbol': 2, 'name': 15}) | def to_hdf(self, fname):
'Dump the `base` attribute into an HDF5 file\n\n Parameters\n ----------\n fname : path\n Path to the HDF5 output file\n '
with pd.HDFStore(fname, 'w') as f:
f.put('/atom_data', self.base, min_itemsize={'symbol': 2, 'name': 15})<|docstring|>Dump the `base` attribute into an HDF5 file
Parameters
----------
fname : path
Path to the HDF5 output file<|endoftext|> |
934635ecf3d36384c1af2d3e35800e0a6ba146fb3805f1e5599eb04ea0b94491 | def __init__(self, id: ID, database: Database, outputs: Tuple[ID], mark_as_test: bool=False):
"\n\t\tDESCRIPTION\n\t\t-----------\n\t\tThe Constructor for LoadFromDataset\n\n\t\tParameters\n\t\t-----------\n\t\tid : bci_lib.ID\n\n\t\t\tid of the stage\n\n\t\tdatabase : bci_lib.Database\n\n\t\t\tThe dictionary which we held all our data in and it's accessible from all stages\n\n\t\tinputs : Tuple[ID, ...]\n\n\t\t\tIt's the tuple of some ids(bci_lib.ID) of input datas\n\n\t\toutputs : Tuple[ID, ...]\n\n\t\t\tIt's the tuple of some ids(bci_lib.ID) of output datas\n\n\t\tmark_as_test : bool | false\n\n\t\t\tIt can determine whether the data labeled as train(false) or test(true)\n\n\t\t-----------\n\t\t"
super().__init__(id, database, (), outputs)
self.mark_as_test = mark_as_test | DESCRIPTION
-----------
The Constructor for LoadFromDataset
Parameters
-----------
id : bci_lib.ID
id of the stage
database : bci_lib.Database
The dictionary which we held all our data in and it's accessible from all stages
inputs : Tuple[ID, ...]
It's the tuple of some ids(bci_lib.ID) of input datas
outputs : Tuple[ID, ...]
It's the tuple of some ids(bci_lib.ID) of output datas
mark_as_test : bool | false
It can determine whether the data labeled as train(false) or test(true)
----------- | bci_lib/Stages/LoadData/LoadData.py | __init__ | SahandSadeghpour/bci_lib | 0 | python | def __init__(self, id: ID, database: Database, outputs: Tuple[ID], mark_as_test: bool=False):
"\n\t\tDESCRIPTION\n\t\t-----------\n\t\tThe Constructor for LoadFromDataset\n\n\t\tParameters\n\t\t-----------\n\t\tid : bci_lib.ID\n\n\t\t\tid of the stage\n\n\t\tdatabase : bci_lib.Database\n\n\t\t\tThe dictionary which we held all our data in and it's accessible from all stages\n\n\t\tinputs : Tuple[ID, ...]\n\n\t\t\tIt's the tuple of some ids(bci_lib.ID) of input datas\n\n\t\toutputs : Tuple[ID, ...]\n\n\t\t\tIt's the tuple of some ids(bci_lib.ID) of output datas\n\n\t\tmark_as_test : bool | false\n\n\t\t\tIt can determine whether the data labeled as train(false) or test(true)\n\n\t\t-----------\n\t\t"
super().__init__(id, database, (), outputs)
self.mark_as_test = mark_as_test | def __init__(self, id: ID, database: Database, outputs: Tuple[ID], mark_as_test: bool=False):
"\n\t\tDESCRIPTION\n\t\t-----------\n\t\tThe Constructor for LoadFromDataset\n\n\t\tParameters\n\t\t-----------\n\t\tid : bci_lib.ID\n\n\t\t\tid of the stage\n\n\t\tdatabase : bci_lib.Database\n\n\t\t\tThe dictionary which we held all our data in and it's accessible from all stages\n\n\t\tinputs : Tuple[ID, ...]\n\n\t\t\tIt's the tuple of some ids(bci_lib.ID) of input datas\n\n\t\toutputs : Tuple[ID, ...]\n\n\t\t\tIt's the tuple of some ids(bci_lib.ID) of output datas\n\n\t\tmark_as_test : bool | false\n\n\t\t\tIt can determine whether the data labeled as train(false) or test(true)\n\n\t\t-----------\n\t\t"
super().__init__(id, database, (), outputs)
self.mark_as_test = mark_as_test<|docstring|>DESCRIPTION
-----------
The Constructor for LoadFromDataset
Parameters
-----------
id : bci_lib.ID
id of the stage
database : bci_lib.Database
The dictionary which we held all our data in and it's accessible from all stages
inputs : Tuple[ID, ...]
It's the tuple of some ids(bci_lib.ID) of input datas
outputs : Tuple[ID, ...]
It's the tuple of some ids(bci_lib.ID) of output datas
mark_as_test : bool | false
It can determine whether the data labeled as train(false) or test(true)
-----------<|endoftext|> |
ab49ae644720d0dd6cacdfe62e4ea5c53fa30fb4b491f9c538525cc8c3300ed0 | def set_params(self, dataset: dict, save_in_cache: bool=True):
'\n\t\tDESCRIPTION\n\t\t-----------\n\t\tLoad From Dataset\n\n\t\tParameter\n\t\t-----------\n\t\tdataset : dict or list\n\n\t\tsave_in_cache: bool | True\n\n\t\tExample\n\t\t-----------\n\n\t\t-----------\n\t\t'
self._params = {'data_info': dataset, 'save_in_cache': save_in_cache}
return self._params | DESCRIPTION
-----------
Load From Dataset
Parameter
-----------
dataset : dict or list
save_in_cache: bool | True
Example
-----------
----------- | bci_lib/Stages/LoadData/LoadData.py | set_params | SahandSadeghpour/bci_lib | 0 | python | def set_params(self, dataset: dict, save_in_cache: bool=True):
'\n\t\tDESCRIPTION\n\t\t-----------\n\t\tLoad From Dataset\n\n\t\tParameter\n\t\t-----------\n\t\tdataset : dict or list\n\n\t\tsave_in_cache: bool | True\n\n\t\tExample\n\t\t-----------\n\n\t\t-----------\n\t\t'
self._params = {'data_info': dataset, 'save_in_cache': save_in_cache}
return self._params | def set_params(self, dataset: dict, save_in_cache: bool=True):
'\n\t\tDESCRIPTION\n\t\t-----------\n\t\tLoad From Dataset\n\n\t\tParameter\n\t\t-----------\n\t\tdataset : dict or list\n\n\t\tsave_in_cache: bool | True\n\n\t\tExample\n\t\t-----------\n\n\t\t-----------\n\t\t'
self._params = {'data_info': dataset, 'save_in_cache': save_in_cache}
return self._params<|docstring|>DESCRIPTION
-----------
Load From Dataset
Parameter
-----------
dataset : dict or list
save_in_cache: bool | True
Example
-----------
-----------<|endoftext|> |
c6782d79222d5a7e994d06e59171595a87369eea71ef0bea932c46caff9666b9 | def do_task(self):
'\n\t\tDESCRIPTION\n\t\t-----------\n\t\tImport the data from datasets and save it on database\n\t\t-----------\n\t\t'
params = self.get_params()
outputs = self._outputs
data = Dataset.load(**params)
self._set_output(data, outputs[0])
self._finish() | DESCRIPTION
-----------
Import the data from datasets and save it on database
----------- | bci_lib/Stages/LoadData/LoadData.py | do_task | SahandSadeghpour/bci_lib | 0 | python | def do_task(self):
'\n\t\tDESCRIPTION\n\t\t-----------\n\t\tImport the data from datasets and save it on database\n\t\t-----------\n\t\t'
params = self.get_params()
outputs = self._outputs
data = Dataset.load(**params)
self._set_output(data, outputs[0])
self._finish() | def do_task(self):
'\n\t\tDESCRIPTION\n\t\t-----------\n\t\tImport the data from datasets and save it on database\n\t\t-----------\n\t\t'
params = self.get_params()
outputs = self._outputs
data = Dataset.load(**params)
self._set_output(data, outputs[0])
self._finish()<|docstring|>DESCRIPTION
-----------
Import the data from datasets and save it on database
-----------<|endoftext|> |
a93834e0c407387999bb48ce408b30bb68bda22c53e4239be1171a35e201e131 | def __init__(self, id: ID, database: Database, outputs: Tuple[ID], mark_as_test: bool=False):
"\n\t\tDESCRIPTION\n\t\t-----------\n\t\tThe Constructor for LoadRaw\n\n\t\tParameters\n\t\t-----------\n\t\tid : bci_lib.ID\n\n\t\t\tid of the stage\n\n\t\tdatabase : bci_lib.Database\n\n\t\t\tThe dictionary which we held all our data in and it's accessible from all stages\n\n\t\tinputs : Tuple[ID, ...]\n\n\t\t\tIt's the tuple of some ids(bci_lib.ID) of input datas\n\n\t\toutputs : Tuple[ID, ...]\n\n\t\t\tIt's the tuple of some ids(bci_lib.ID) of output datas\n\n\t\tmark_as_test : bool | false\n\n\t\t\tIt can determine whether the data labeled as train(false) or test(true)\n\n\t\t-----------\n\t\t"
super().__init__(id, database, (), outputs)
self.mark_as_test = mark_as_test | DESCRIPTION
-----------
The Constructor for LoadRaw
Parameters
-----------
id : bci_lib.ID
id of the stage
database : bci_lib.Database
The dictionary which we held all our data in and it's accessible from all stages
inputs : Tuple[ID, ...]
It's the tuple of some ids(bci_lib.ID) of input datas
outputs : Tuple[ID, ...]
It's the tuple of some ids(bci_lib.ID) of output datas
mark_as_test : bool | false
It can determine whether the data labeled as train(false) or test(true)
----------- | bci_lib/Stages/LoadData/LoadData.py | __init__ | SahandSadeghpour/bci_lib | 0 | python | def __init__(self, id: ID, database: Database, outputs: Tuple[ID], mark_as_test: bool=False):
"\n\t\tDESCRIPTION\n\t\t-----------\n\t\tThe Constructor for LoadRaw\n\n\t\tParameters\n\t\t-----------\n\t\tid : bci_lib.ID\n\n\t\t\tid of the stage\n\n\t\tdatabase : bci_lib.Database\n\n\t\t\tThe dictionary which we held all our data in and it's accessible from all stages\n\n\t\tinputs : Tuple[ID, ...]\n\n\t\t\tIt's the tuple of some ids(bci_lib.ID) of input datas\n\n\t\toutputs : Tuple[ID, ...]\n\n\t\t\tIt's the tuple of some ids(bci_lib.ID) of output datas\n\n\t\tmark_as_test : bool | false\n\n\t\t\tIt can determine whether the data labeled as train(false) or test(true)\n\n\t\t-----------\n\t\t"
super().__init__(id, database, (), outputs)
self.mark_as_test = mark_as_test | def __init__(self, id: ID, database: Database, outputs: Tuple[ID], mark_as_test: bool=False):
"\n\t\tDESCRIPTION\n\t\t-----------\n\t\tThe Constructor for LoadRaw\n\n\t\tParameters\n\t\t-----------\n\t\tid : bci_lib.ID\n\n\t\t\tid of the stage\n\n\t\tdatabase : bci_lib.Database\n\n\t\t\tThe dictionary which we held all our data in and it's accessible from all stages\n\n\t\tinputs : Tuple[ID, ...]\n\n\t\t\tIt's the tuple of some ids(bci_lib.ID) of input datas\n\n\t\toutputs : Tuple[ID, ...]\n\n\t\t\tIt's the tuple of some ids(bci_lib.ID) of output datas\n\n\t\tmark_as_test : bool | false\n\n\t\t\tIt can determine whether the data labeled as train(false) or test(true)\n\n\t\t-----------\n\t\t"
super().__init__(id, database, (), outputs)
self.mark_as_test = mark_as_test<|docstring|>DESCRIPTION
-----------
The Constructor for LoadRaw
Parameters
-----------
id : bci_lib.ID
id of the stage
database : bci_lib.Database
The dictionary which we held all our data in and it's accessible from all stages
inputs : Tuple[ID, ...]
It's the tuple of some ids(bci_lib.ID) of input datas
outputs : Tuple[ID, ...]
It's the tuple of some ids(bci_lib.ID) of output datas
mark_as_test : bool | false
It can determine whether the data labeled as train(false) or test(true)
-----------<|endoftext|> |
67e6ab5ebbff9898771d8c97c3bdea4e9d56d77cbe166c37a190ba793b4954d0 | def set_params(self, rawdata: mne.io.Raw):
'\n\t\tDESCRIPTION\n\t\t-----------\n\t\tLoad raw data\n\n\t\tParameter\n\t\t-----------\n\t\trawdata : Instance of mne.io.Raw\n\n\t\tExample\n\t\t-----------\n\n\t\t-----------\n\t\t'
self._params = {'data': rawdata}
return self._params | DESCRIPTION
-----------
Load raw data
Parameter
-----------
rawdata : Instance of mne.io.Raw
Example
-----------
----------- | bci_lib/Stages/LoadData/LoadData.py | set_params | SahandSadeghpour/bci_lib | 0 | python | def set_params(self, rawdata: mne.io.Raw):
'\n\t\tDESCRIPTION\n\t\t-----------\n\t\tLoad raw data\n\n\t\tParameter\n\t\t-----------\n\t\trawdata : Instance of mne.io.Raw\n\n\t\tExample\n\t\t-----------\n\n\t\t-----------\n\t\t'
self._params = {'data': rawdata}
return self._params | def set_params(self, rawdata: mne.io.Raw):
'\n\t\tDESCRIPTION\n\t\t-----------\n\t\tLoad raw data\n\n\t\tParameter\n\t\t-----------\n\t\trawdata : Instance of mne.io.Raw\n\n\t\tExample\n\t\t-----------\n\n\t\t-----------\n\t\t'
self._params = {'data': rawdata}
return self._params<|docstring|>DESCRIPTION
-----------
Load raw data
Parameter
-----------
rawdata : Instance of mne.io.Raw
Example
-----------
-----------<|endoftext|> |
66490ace81b37022ffbe184f59c70fae9251f3a0615934fc782be91b65a1176e | def do_task(self):
'\n\t\tDESCRIPTION\n\t\t-----------\n\t\tImport the raw data from user and save it on database\n\t\t-----------\n\t\t'
raw = self._params.pop('data')
output = RawData(self._outputs[0], raw)
self._set_output(output, self._outputs[0]) | DESCRIPTION
-----------
Import the raw data from user and save it on database
----------- | bci_lib/Stages/LoadData/LoadData.py | do_task | SahandSadeghpour/bci_lib | 0 | python | def do_task(self):
'\n\t\tDESCRIPTION\n\t\t-----------\n\t\tImport the raw data from user and save it on database\n\t\t-----------\n\t\t'
raw = self._params.pop('data')
output = RawData(self._outputs[0], raw)
self._set_output(output, self._outputs[0]) | def do_task(self):
'\n\t\tDESCRIPTION\n\t\t-----------\n\t\tImport the raw data from user and save it on database\n\t\t-----------\n\t\t'
raw = self._params.pop('data')
output = RawData(self._outputs[0], raw)
self._set_output(output, self._outputs[0])<|docstring|>DESCRIPTION
-----------
Import the raw data from user and save it on database
-----------<|endoftext|> |
4cd467ebba479fb54b0a35d03e7a0a7b13583fac8ef63d7d4ab9613b3e207394 | def __init__(self, id: ID, database: Database, outputs: Tuple[ID], mark_as_test: bool=False):
"\n\t\tDESCRIPTION\n\t\t-----------\n\t\tThe Constructor for LoadEpochs\n\n\t\tParameters\n\t\t-----------\n\t\tid : bci_lib.ID\n\n\t\t\tid of the stage\n\n\t\tdatabase : bci_lib.Database\n\n\t\t\tThe dictionary which we held all our data in and it's accessible from all stages\n\n\t\tinputs : Tuple[ID, ...]\n\n\t\t\tIt's the tuple of some ids(bci_lib.ID) of input datas\n\n\t\toutputs : Tuple[ID, ...]\n\n\t\t\tIt's the tuple of some ids(bci_lib.ID) of output datas\n\n\t\tmark_as_test : bool | false\n\n\t\t\tIt can determine whether the data labeled as train(false) or test(true)\n\n\t\t-----------\n\t\t"
super().__init__(id, database, (), outputs)
self.mark_as_test = mark_as_test | DESCRIPTION
-----------
The Constructor for LoadEpochs
Parameters
-----------
id : bci_lib.ID
id of the stage
database : bci_lib.Database
The dictionary which we held all our data in and it's accessible from all stages
inputs : Tuple[ID, ...]
It's the tuple of some ids(bci_lib.ID) of input datas
outputs : Tuple[ID, ...]
It's the tuple of some ids(bci_lib.ID) of output datas
mark_as_test : bool | false
It can determine whether the data labeled as train(false) or test(true)
----------- | bci_lib/Stages/LoadData/LoadData.py | __init__ | SahandSadeghpour/bci_lib | 0 | python | def __init__(self, id: ID, database: Database, outputs: Tuple[ID], mark_as_test: bool=False):
"\n\t\tDESCRIPTION\n\t\t-----------\n\t\tThe Constructor for LoadEpochs\n\n\t\tParameters\n\t\t-----------\n\t\tid : bci_lib.ID\n\n\t\t\tid of the stage\n\n\t\tdatabase : bci_lib.Database\n\n\t\t\tThe dictionary which we held all our data in and it's accessible from all stages\n\n\t\tinputs : Tuple[ID, ...]\n\n\t\t\tIt's the tuple of some ids(bci_lib.ID) of input datas\n\n\t\toutputs : Tuple[ID, ...]\n\n\t\t\tIt's the tuple of some ids(bci_lib.ID) of output datas\n\n\t\tmark_as_test : bool | false\n\n\t\t\tIt can determine whether the data labeled as train(false) or test(true)\n\n\t\t-----------\n\t\t"
super().__init__(id, database, (), outputs)
self.mark_as_test = mark_as_test | def __init__(self, id: ID, database: Database, outputs: Tuple[ID], mark_as_test: bool=False):
"\n\t\tDESCRIPTION\n\t\t-----------\n\t\tThe Constructor for LoadEpochs\n\n\t\tParameters\n\t\t-----------\n\t\tid : bci_lib.ID\n\n\t\t\tid of the stage\n\n\t\tdatabase : bci_lib.Database\n\n\t\t\tThe dictionary which we held all our data in and it's accessible from all stages\n\n\t\tinputs : Tuple[ID, ...]\n\n\t\t\tIt's the tuple of some ids(bci_lib.ID) of input datas\n\n\t\toutputs : Tuple[ID, ...]\n\n\t\t\tIt's the tuple of some ids(bci_lib.ID) of output datas\n\n\t\tmark_as_test : bool | false\n\n\t\t\tIt can determine whether the data labeled as train(false) or test(true)\n\n\t\t-----------\n\t\t"
super().__init__(id, database, (), outputs)
self.mark_as_test = mark_as_test<|docstring|>DESCRIPTION
-----------
The Constructor for LoadEpochs
Parameters
-----------
id : bci_lib.ID
id of the stage
database : bci_lib.Database
The dictionary which we held all our data in and it's accessible from all stages
inputs : Tuple[ID, ...]
It's the tuple of some ids(bci_lib.ID) of input datas
outputs : Tuple[ID, ...]
It's the tuple of some ids(bci_lib.ID) of output datas
mark_as_test : bool | false
It can determine whether the data labeled as train(false) or test(true)
-----------<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.