body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
cec14cc18872bf4e2e42db166b83bd13559ffb24e6468025a1b93f149b3a3d18 | def forward(self, pos):
'\n @brief API \n @param pos cell locations. The array consists of x locations of movable cells, fixed cells, and filler cells, then y locations of them \n '
if (self.initial_density_map is None):
if pos.is_cuda:
func = density_map_cuda.fixed_density_map
else:
func = density_map_cpp.fixed_density_map
self.initial_density_map = func(pos, self.node_size_x, self.node_size_y, self.bin_center_x, self.bin_center_y, self.xl, self.yl, self.xh, self.yh, self.bin_size_x, self.bin_size_y, self.num_movable_nodes, self.num_terminals)
density_map = DensityMapFunction.forward(pos=pos, node_size_x=self.node_size_x, node_size_y=self.node_size_y, bin_center_x=self.bin_center_x, bin_center_y=self.bin_center_y, initial_density_map=self.initial_density_map, xl=self.xl, yl=self.yl, xh=self.xh, yh=self.yh, bin_size_x=self.bin_size_x, bin_size_y=self.bin_size_y, num_movable_nodes=self.num_movable_nodes, num_filler_nodes=self.num_filler_nodes)
return density_map | @brief API
@param pos cell locations. The array consists of x locations of movable cells, fixed cells, and filler cells, then y locations of them | dreamplace/ops/density_map/density_map.py | forward | ArEsKay3/DREAMPlace | 323 | python | def forward(self, pos):
'\n @brief API \n @param pos cell locations. The array consists of x locations of movable cells, fixed cells, and filler cells, then y locations of them \n '
if (self.initial_density_map is None):
if pos.is_cuda:
func = density_map_cuda.fixed_density_map
else:
func = density_map_cpp.fixed_density_map
self.initial_density_map = func(pos, self.node_size_x, self.node_size_y, self.bin_center_x, self.bin_center_y, self.xl, self.yl, self.xh, self.yh, self.bin_size_x, self.bin_size_y, self.num_movable_nodes, self.num_terminals)
density_map = DensityMapFunction.forward(pos=pos, node_size_x=self.node_size_x, node_size_y=self.node_size_y, bin_center_x=self.bin_center_x, bin_center_y=self.bin_center_y, initial_density_map=self.initial_density_map, xl=self.xl, yl=self.yl, xh=self.xh, yh=self.yh, bin_size_x=self.bin_size_x, bin_size_y=self.bin_size_y, num_movable_nodes=self.num_movable_nodes, num_filler_nodes=self.num_filler_nodes)
return density_map | def forward(self, pos):
'\n @brief API \n @param pos cell locations. The array consists of x locations of movable cells, fixed cells, and filler cells, then y locations of them \n '
if (self.initial_density_map is None):
if pos.is_cuda:
func = density_map_cuda.fixed_density_map
else:
func = density_map_cpp.fixed_density_map
self.initial_density_map = func(pos, self.node_size_x, self.node_size_y, self.bin_center_x, self.bin_center_y, self.xl, self.yl, self.xh, self.yh, self.bin_size_x, self.bin_size_y, self.num_movable_nodes, self.num_terminals)
density_map = DensityMapFunction.forward(pos=pos, node_size_x=self.node_size_x, node_size_y=self.node_size_y, bin_center_x=self.bin_center_x, bin_center_y=self.bin_center_y, initial_density_map=self.initial_density_map, xl=self.xl, yl=self.yl, xh=self.xh, yh=self.yh, bin_size_x=self.bin_size_x, bin_size_y=self.bin_size_y, num_movable_nodes=self.num_movable_nodes, num_filler_nodes=self.num_filler_nodes)
return density_map<|docstring|>@brief API
@param pos cell locations. The array consists of x locations of movable cells, fixed cells, and filler cells, then y locations of them<|endoftext|> |
1d6f324292dd85c2e7f90e786f6cf2c1429a09b7072e8c3a28872da0e499fd0f | def get_template_names(self):
'\n Dispatch template according to the kind of request: ajax or normal.\n '
if self.request.is_ajax():
return [self.list_template_name]
else:
return [self.template_name] | Dispatch template according to the kind of request: ajax or normal. | umap/views.py | get_template_names | xiongjiabin/umap | 0 | python | def get_template_names(self):
'\n \n '
if self.request.is_ajax():
return [self.list_template_name]
else:
return [self.template_name] | def get_template_names(self):
'\n \n '
if self.request.is_ajax():
return [self.list_template_name]
else:
return [self.template_name]<|docstring|>Dispatch template according to the kind of request: ajax or normal.<|endoftext|> |
67d6431f8e51682ef25fe9fcfd7035ed748026a59175a944fde3f953d87ceeb0 | def get_template_names(self):
'\n Dispatch template according to the kind of request: ajax or normal.\n '
if self.request.is_ajax():
return [self.list_template_name]
else:
return super(CompanyMaps, self).get_template_names() | Dispatch template according to the kind of request: ajax or normal. | umap/views.py | get_template_names | xiongjiabin/umap | 0 | python | def get_template_names(self):
'\n \n '
if self.request.is_ajax():
return [self.list_template_name]
else:
return super(CompanyMaps, self).get_template_names() | def get_template_names(self):
'\n \n '
if self.request.is_ajax():
return [self.list_template_name]
else:
return super(CompanyMaps, self).get_template_names()<|docstring|>Dispatch template according to the kind of request: ajax or normal.<|endoftext|> |
44b0a93750f28975a3c80944345b45f42277fae32755879035e1e3593a704699 | def get_template_names(self):
'\n Dispatch template according to the kind of request: ajax or normal.\n '
if self.request.is_ajax():
return [self.list_template_name]
else:
return super(UserMaps, self).get_template_names() | Dispatch template according to the kind of request: ajax or normal. | umap/views.py | get_template_names | xiongjiabin/umap | 0 | python | def get_template_names(self):
'\n \n '
if self.request.is_ajax():
return [self.list_template_name]
else:
return super(UserMaps, self).get_template_names() | def get_template_names(self):
'\n \n '
if self.request.is_ajax():
return [self.list_template_name]
else:
return super(UserMaps, self).get_template_names()<|docstring|>Dispatch template according to the kind of request: ajax or normal.<|endoftext|> |
44a42ec37eb6485d1a8da002d59f339f51d9c17daa65999d33834fa4fc7cdc7f | def get_template_names(self):
'\n Dispatch template according to the kind of request: ajax or normal.\n '
if self.request.is_ajax():
return [self.list_template_name]
else:
return super(Search, self).get_template_names() | Dispatch template according to the kind of request: ajax or normal. | umap/views.py | get_template_names | xiongjiabin/umap | 0 | python | def get_template_names(self):
'\n \n '
if self.request.is_ajax():
return [self.list_template_name]
else:
return super(Search, self).get_template_names() | def get_template_names(self):
'\n \n '
if self.request.is_ajax():
return [self.list_template_name]
else:
return super(Search, self).get_template_names()<|docstring|>Dispatch template according to the kind of request: ajax or normal.<|endoftext|> |
501dffd4e531ddebc6bcca503783b0e46a87b01bffdbd376710a23e68e3a646e | def categorical(x, mu=255, normalize=True):
'\n Mu-law compress a block of audio samples, and convert them into a\n categorical distribution\n '
if normalize:
mx = x.max()
x = np.divide(x, mx, where=(mx != 0))
x = mu_law(x)
x = ((x - x.min()) * 0.5)
x = (x * mu).astype(np.uint8)
c = np.zeros((np.product(x.shape), (mu + 1)), dtype=np.uint8)
c[(np.arange(len(c)), x.flatten())] = 1
return ArrayWithUnits(c.reshape((x.shape + ((mu + 1),))), (x.dimensions + (IdentityDimension(),))) | Mu-law compress a block of audio samples, and convert them into a
categorical distribution | zounds/timeseries/functional.py | categorical | FelixAbrahamsson/zounds | 20 | python | def categorical(x, mu=255, normalize=True):
'\n Mu-law compress a block of audio samples, and convert them into a\n categorical distribution\n '
if normalize:
mx = x.max()
x = np.divide(x, mx, where=(mx != 0))
x = mu_law(x)
x = ((x - x.min()) * 0.5)
x = (x * mu).astype(np.uint8)
c = np.zeros((np.product(x.shape), (mu + 1)), dtype=np.uint8)
c[(np.arange(len(c)), x.flatten())] = 1
return ArrayWithUnits(c.reshape((x.shape + ((mu + 1),))), (x.dimensions + (IdentityDimension(),))) | def categorical(x, mu=255, normalize=True):
'\n Mu-law compress a block of audio samples, and convert them into a\n categorical distribution\n '
if normalize:
mx = x.max()
x = np.divide(x, mx, where=(mx != 0))
x = mu_law(x)
x = ((x - x.min()) * 0.5)
x = (x * mu).astype(np.uint8)
c = np.zeros((np.product(x.shape), (mu + 1)), dtype=np.uint8)
c[(np.arange(len(c)), x.flatten())] = 1
return ArrayWithUnits(c.reshape((x.shape + ((mu + 1),))), (x.dimensions + (IdentityDimension(),)))<|docstring|>Mu-law compress a block of audio samples, and convert them into a
categorical distribution<|endoftext|> |
f32712e41aeb400ef21c4ebb3a1fe33fbd78b05e66c9f5c254a7ea890497bd1f | def inverse_categorical(x, mu=255):
'\n Invert categorical samples\n '
flat = x.reshape(((- 1), x.shape[(- 1)]))
indices = np.argmax(flat, axis=1).astype(np.float32)
indices = ((indices / mu) - 0.5)
inverted = inverse_mu_law(indices, mu=mu).reshape(x.shape[:(- 1)])
return ArrayWithUnits(inverted, x.dimensions[:2]) | Invert categorical samples | zounds/timeseries/functional.py | inverse_categorical | FelixAbrahamsson/zounds | 20 | python | def inverse_categorical(x, mu=255):
'\n \n '
flat = x.reshape(((- 1), x.shape[(- 1)]))
indices = np.argmax(flat, axis=1).astype(np.float32)
indices = ((indices / mu) - 0.5)
inverted = inverse_mu_law(indices, mu=mu).reshape(x.shape[:(- 1)])
return ArrayWithUnits(inverted, x.dimensions[:2]) | def inverse_categorical(x, mu=255):
'\n \n '
flat = x.reshape(((- 1), x.shape[(- 1)]))
indices = np.argmax(flat, axis=1).astype(np.float32)
indices = ((indices / mu) - 0.5)
inverted = inverse_mu_law(indices, mu=mu).reshape(x.shape[:(- 1)])
return ArrayWithUnits(inverted, x.dimensions[:2])<|docstring|>Invert categorical samples<|endoftext|> |
78a024ff3cf07a1b92bce11f47c45da43d77df5e02cf1f6e8dc953fbbfecd5b4 | def _get_network(self, pod_id, pod_name, pod_namespace):
"\n Get virtual network to be associated with the pod.\n The heuristics to determine which virtual network to use for the pod\n is as follows:\n if (virtual network is annotated in the pod config):\n Use virtual network configured on the pod.\n else if (virtual network if annotated in the pod's namespace):\n Use virtual network configured on the namespace.\n else if (pod is in a isolated namespace):\n Use the virtual network associated with isolated namespace.\n else:\n Use the pod virtual network associated with kubernetes cluster.\n "
pod = PodKM.find_by_name_or_uuid(pod_id)
if (not pod):
self._logger.notice(('%s - Pod %s:%s:%s Not Found(Might Got Delete Event From K8s)' % (self._name, pod_namespace, pod_name, pod_id)))
return
vn_fq_name = pod.get_vn_fq_name()
ns = self._get_namespace(pod_namespace)
if (not vn_fq_name):
vn_fq_name = ns.get_annotated_network_fq_name()
if (not vn_fq_name):
if self._is_pod_network_isolated(pod_namespace):
vn_fq_name = ns.get_isolated_network_fq_name()
if (not vn_fq_name):
vn_fq_name = vnc_kube_config.cluster_default_network_fq_name()
vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn_fq_name)
return vn_obj | Get virtual network to be associated with the pod.
The heuristics to determine which virtual network to use for the pod
is as follows:
if (virtual network is annotated in the pod config):
Use virtual network configured on the pod.
else if (virtual network if annotated in the pod's namespace):
Use virtual network configured on the namespace.
else if (pod is in a isolated namespace):
Use the virtual network associated with isolated namespace.
else:
Use the pod virtual network associated with kubernetes cluster. | src/container/kube-manager/kube_manager/vnc/vnc_pod.py | _get_network | zhongyangni/controller | 0 | python | def _get_network(self, pod_id, pod_name, pod_namespace):
"\n Get virtual network to be associated with the pod.\n The heuristics to determine which virtual network to use for the pod\n is as follows:\n if (virtual network is annotated in the pod config):\n Use virtual network configured on the pod.\n else if (virtual network if annotated in the pod's namespace):\n Use virtual network configured on the namespace.\n else if (pod is in a isolated namespace):\n Use the virtual network associated with isolated namespace.\n else:\n Use the pod virtual network associated with kubernetes cluster.\n "
pod = PodKM.find_by_name_or_uuid(pod_id)
if (not pod):
self._logger.notice(('%s - Pod %s:%s:%s Not Found(Might Got Delete Event From K8s)' % (self._name, pod_namespace, pod_name, pod_id)))
return
vn_fq_name = pod.get_vn_fq_name()
ns = self._get_namespace(pod_namespace)
if (not vn_fq_name):
vn_fq_name = ns.get_annotated_network_fq_name()
if (not vn_fq_name):
if self._is_pod_network_isolated(pod_namespace):
vn_fq_name = ns.get_isolated_network_fq_name()
if (not vn_fq_name):
vn_fq_name = vnc_kube_config.cluster_default_network_fq_name()
vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn_fq_name)
return vn_obj | def _get_network(self, pod_id, pod_name, pod_namespace):
"\n Get virtual network to be associated with the pod.\n The heuristics to determine which virtual network to use for the pod\n is as follows:\n if (virtual network is annotated in the pod config):\n Use virtual network configured on the pod.\n else if (virtual network if annotated in the pod's namespace):\n Use virtual network configured on the namespace.\n else if (pod is in a isolated namespace):\n Use the virtual network associated with isolated namespace.\n else:\n Use the pod virtual network associated with kubernetes cluster.\n "
pod = PodKM.find_by_name_or_uuid(pod_id)
if (not pod):
self._logger.notice(('%s - Pod %s:%s:%s Not Found(Might Got Delete Event From K8s)' % (self._name, pod_namespace, pod_name, pod_id)))
return
vn_fq_name = pod.get_vn_fq_name()
ns = self._get_namespace(pod_namespace)
if (not vn_fq_name):
vn_fq_name = ns.get_annotated_network_fq_name()
if (not vn_fq_name):
if self._is_pod_network_isolated(pod_namespace):
vn_fq_name = ns.get_isolated_network_fq_name()
if (not vn_fq_name):
vn_fq_name = vnc_kube_config.cluster_default_network_fq_name()
vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn_fq_name)
return vn_obj<|docstring|>Get virtual network to be associated with the pod.
The heuristics to determine which virtual network to use for the pod
is as follows:
if (virtual network is annotated in the pod config):
Use virtual network configured on the pod.
else if (virtual network if annotated in the pod's namespace):
Use virtual network configured on the namespace.
else if (pod is in a isolated namespace):
Use the virtual network associated with isolated namespace.
else:
Use the pod virtual network associated with kubernetes cluster.<|endoftext|> |
17d19fbf0d955b5b8fb33aad954a259471f32694aff00c2e7929876ad9a23c83 | def _create_cluster_service_fip(self, pod_name, pod_namespace, vmi_uuid):
'\n Isolated Pods in the cluster will be allocated a floating ip\n from the cluster service network, so that the pods can talk\n to cluster services.\n '
if (not self._service_fip_pool):
return
fip_pool_obj = FloatingIpPool()
fip_pool_obj.uuid = self._service_fip_pool.uuid
fip_pool_obj.fq_name = self._service_fip_pool.fq_name
fip_pool_obj.name = self._service_fip_pool.name
obj_uuid = str(uuid.uuid1())
display_name = VncCommon.make_display_name(pod_namespace, pod_name)
name = VncCommon.make_name(pod_name, obj_uuid)
fip_obj = FloatingIp(name=('cluster-svc-fip-%s' % name), parent_obj=fip_pool_obj, floating_ip_traffic_direction='egress', display_name=display_name)
fip_obj.uuid = obj_uuid
vmi_obj = self._vnc_lib.virtual_machine_interface_read(id=vmi_uuid)
fip_obj.set_virtual_machine_interface(vmi_obj)
FloatingIpKM.add_annotations(self, fip_obj, pod_namespace, pod_name)
try:
fip_uuid = self._vnc_lib.floating_ip_create(fip_obj)
except RefsExistError:
fip_uuid = self._vnc_lib.floating_ip_update(fip_obj)
FloatingIpKM.locate(fip_uuid)
return | Isolated Pods in the cluster will be allocated a floating ip
from the cluster service network, so that the pods can talk
to cluster services. | src/container/kube-manager/kube_manager/vnc/vnc_pod.py | _create_cluster_service_fip | zhongyangni/controller | 0 | python | def _create_cluster_service_fip(self, pod_name, pod_namespace, vmi_uuid):
'\n Isolated Pods in the cluster will be allocated a floating ip\n from the cluster service network, so that the pods can talk\n to cluster services.\n '
if (not self._service_fip_pool):
return
fip_pool_obj = FloatingIpPool()
fip_pool_obj.uuid = self._service_fip_pool.uuid
fip_pool_obj.fq_name = self._service_fip_pool.fq_name
fip_pool_obj.name = self._service_fip_pool.name
obj_uuid = str(uuid.uuid1())
display_name = VncCommon.make_display_name(pod_namespace, pod_name)
name = VncCommon.make_name(pod_name, obj_uuid)
fip_obj = FloatingIp(name=('cluster-svc-fip-%s' % name), parent_obj=fip_pool_obj, floating_ip_traffic_direction='egress', display_name=display_name)
fip_obj.uuid = obj_uuid
vmi_obj = self._vnc_lib.virtual_machine_interface_read(id=vmi_uuid)
fip_obj.set_virtual_machine_interface(vmi_obj)
FloatingIpKM.add_annotations(self, fip_obj, pod_namespace, pod_name)
try:
fip_uuid = self._vnc_lib.floating_ip_create(fip_obj)
except RefsExistError:
fip_uuid = self._vnc_lib.floating_ip_update(fip_obj)
FloatingIpKM.locate(fip_uuid)
return | def _create_cluster_service_fip(self, pod_name, pod_namespace, vmi_uuid):
'\n Isolated Pods in the cluster will be allocated a floating ip\n from the cluster service network, so that the pods can talk\n to cluster services.\n '
if (not self._service_fip_pool):
return
fip_pool_obj = FloatingIpPool()
fip_pool_obj.uuid = self._service_fip_pool.uuid
fip_pool_obj.fq_name = self._service_fip_pool.fq_name
fip_pool_obj.name = self._service_fip_pool.name
obj_uuid = str(uuid.uuid1())
display_name = VncCommon.make_display_name(pod_namespace, pod_name)
name = VncCommon.make_name(pod_name, obj_uuid)
fip_obj = FloatingIp(name=('cluster-svc-fip-%s' % name), parent_obj=fip_pool_obj, floating_ip_traffic_direction='egress', display_name=display_name)
fip_obj.uuid = obj_uuid
vmi_obj = self._vnc_lib.virtual_machine_interface_read(id=vmi_uuid)
fip_obj.set_virtual_machine_interface(vmi_obj)
FloatingIpKM.add_annotations(self, fip_obj, pod_namespace, pod_name)
try:
fip_uuid = self._vnc_lib.floating_ip_create(fip_obj)
except RefsExistError:
fip_uuid = self._vnc_lib.floating_ip_update(fip_obj)
FloatingIpKM.locate(fip_uuid)
return<|docstring|>Isolated Pods in the cluster will be allocated a floating ip
from the cluster service network, so that the pods can talk
to cluster services.<|endoftext|> |
99f29d36f72080ecfc74a62266ac16e9e9fdf4a67215d368aa10cd4bce371068 | def __init__(self, n_input=7, n_output=6, n_h=2, size_h=128):
'\n Specify the neural network architecture\n\n :param n_input: The dimension of the input\n :param n_output: The dimension of the output\n :param n_h: The number of the hidden layer\n :param size_h: The dimension of the hidden layer\n '
super(MLP, self).__init__()
self.n_input = n_input
self.fc_in = nn.Linear(n_input, size_h)
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
assert (n_h >= 1), 'h must be integer and >= 1'
self.fc_list = nn.ModuleList()
for i in range((n_h - 1)):
self.fc_list.append(nn.Linear(size_h, size_h))
self.fc_out = nn.Linear(size_h, n_output)
nn.init.uniform_(self.fc_in.weight, (- 0.1), 0.1)
nn.init.uniform_(self.fc_out.weight, (- 0.1), 0.1)
self.fc_list.apply(self.init_normal) | Specify the neural network architecture
:param n_input: The dimension of the input
:param n_output: The dimension of the output
:param n_h: The number of the hidden layer
:param size_h: The dimension of the hidden layer | MPC/MPC-Qube/dynamics.py | __init__ | lambert-x/Reinforcement-Learning-DQN-MPC | 5 | python | def __init__(self, n_input=7, n_output=6, n_h=2, size_h=128):
'\n Specify the neural network architecture\n\n :param n_input: The dimension of the input\n :param n_output: The dimension of the output\n :param n_h: The number of the hidden layer\n :param size_h: The dimension of the hidden layer\n '
super(MLP, self).__init__()
self.n_input = n_input
self.fc_in = nn.Linear(n_input, size_h)
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
assert (n_h >= 1), 'h must be integer and >= 1'
self.fc_list = nn.ModuleList()
for i in range((n_h - 1)):
self.fc_list.append(nn.Linear(size_h, size_h))
self.fc_out = nn.Linear(size_h, n_output)
nn.init.uniform_(self.fc_in.weight, (- 0.1), 0.1)
nn.init.uniform_(self.fc_out.weight, (- 0.1), 0.1)
self.fc_list.apply(self.init_normal) | def __init__(self, n_input=7, n_output=6, n_h=2, size_h=128):
'\n Specify the neural network architecture\n\n :param n_input: The dimension of the input\n :param n_output: The dimension of the output\n :param n_h: The number of the hidden layer\n :param size_h: The dimension of the hidden layer\n '
super(MLP, self).__init__()
self.n_input = n_input
self.fc_in = nn.Linear(n_input, size_h)
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
assert (n_h >= 1), 'h must be integer and >= 1'
self.fc_list = nn.ModuleList()
for i in range((n_h - 1)):
self.fc_list.append(nn.Linear(size_h, size_h))
self.fc_out = nn.Linear(size_h, n_output)
nn.init.uniform_(self.fc_in.weight, (- 0.1), 0.1)
nn.init.uniform_(self.fc_out.weight, (- 0.1), 0.1)
self.fc_list.apply(self.init_normal)<|docstring|>Specify the neural network architecture
:param n_input: The dimension of the input
:param n_output: The dimension of the output
:param n_h: The number of the hidden layer
:param size_h: The dimension of the hidden layer<|endoftext|> |
da49e022b21d3c6bf52fac67f441bea03e5d246169134bb5b51da91ceacb83a2 | def train(self, trainset, testset=0):
'\n Train the dynamic model with input dataset\n\n :param trainset: (Dictionary) The input training set\n :param testset: (Dictionary) The input test set\n :return:\n '
(datasets, labels) = self.norm_train_data(trainset['data'], trainset['label'])
if (testset != 0):
(test_datasets, test_labels) = self.norm_test_data(testset['data'], testset['label'])
train_dataset = MyDataset(datasets, labels)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True)
total_step = len(train_loader)
print(f'Total training step per epoch [{total_step}]')
loss_epochs = []
for epoch in range(1, (self.n_epochs + 1)):
loss_this_epoch = []
for (i, (datas, labels)) in enumerate(train_loader):
datas = self.Variable(torch.FloatTensor(np.float32(datas)))
labels = self.Variable(torch.FloatTensor(np.float32(labels)))
self.optimizer.zero_grad()
outputs = self.model(datas)
loss = self.criterion(outputs, labels)
loss.backward()
self.optimizer.step()
loss_this_epoch.append(loss.item())
loss_epochs.append(np.mean(loss_this_epoch))
if self.save_model_flag:
torch.save(self.model, self.save_model_path)
if (self.save_loss_fig and ((epoch % self.save_loss_fig_frequency) == 0)):
self.save_figure(epoch, loss_epochs, loss_this_epoch)
if (testset != 0):
loss_test = self.validate_model(test_datasets, test_labels)
print(f'Epoch [{epoch}/{self.n_epochs}], Training Loss: {np.mean(loss_this_epoch):.8f}, Test Loss: {loss_test:.8f}')
return loss_epochs | Train the dynamic model with input dataset
:param trainset: (Dictionary) The input training set
:param testset: (Dictionary) The input test set
:return: | MPC/MPC-Qube/dynamics.py | train | lambert-x/Reinforcement-Learning-DQN-MPC | 5 | python | def train(self, trainset, testset=0):
'\n Train the dynamic model with input dataset\n\n :param trainset: (Dictionary) The input training set\n :param testset: (Dictionary) The input test set\n :return:\n '
(datasets, labels) = self.norm_train_data(trainset['data'], trainset['label'])
if (testset != 0):
(test_datasets, test_labels) = self.norm_test_data(testset['data'], testset['label'])
train_dataset = MyDataset(datasets, labels)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True)
total_step = len(train_loader)
print(f'Total training step per epoch [{total_step}]')
loss_epochs = []
for epoch in range(1, (self.n_epochs + 1)):
loss_this_epoch = []
for (i, (datas, labels)) in enumerate(train_loader):
datas = self.Variable(torch.FloatTensor(np.float32(datas)))
labels = self.Variable(torch.FloatTensor(np.float32(labels)))
self.optimizer.zero_grad()
outputs = self.model(datas)
loss = self.criterion(outputs, labels)
loss.backward()
self.optimizer.step()
loss_this_epoch.append(loss.item())
loss_epochs.append(np.mean(loss_this_epoch))
if self.save_model_flag:
torch.save(self.model, self.save_model_path)
if (self.save_loss_fig and ((epoch % self.save_loss_fig_frequency) == 0)):
self.save_figure(epoch, loss_epochs, loss_this_epoch)
if (testset != 0):
loss_test = self.validate_model(test_datasets, test_labels)
print(f'Epoch [{epoch}/{self.n_epochs}], Training Loss: {np.mean(loss_this_epoch):.8f}, Test Loss: {loss_test:.8f}')
return loss_epochs | def train(self, trainset, testset=0):
'\n Train the dynamic model with input dataset\n\n :param trainset: (Dictionary) The input training set\n :param testset: (Dictionary) The input test set\n :return:\n '
(datasets, labels) = self.norm_train_data(trainset['data'], trainset['label'])
if (testset != 0):
(test_datasets, test_labels) = self.norm_test_data(testset['data'], testset['label'])
train_dataset = MyDataset(datasets, labels)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True)
total_step = len(train_loader)
print(f'Total training step per epoch [{total_step}]')
loss_epochs = []
for epoch in range(1, (self.n_epochs + 1)):
loss_this_epoch = []
for (i, (datas, labels)) in enumerate(train_loader):
datas = self.Variable(torch.FloatTensor(np.float32(datas)))
labels = self.Variable(torch.FloatTensor(np.float32(labels)))
self.optimizer.zero_grad()
outputs = self.model(datas)
loss = self.criterion(outputs, labels)
loss.backward()
self.optimizer.step()
loss_this_epoch.append(loss.item())
loss_epochs.append(np.mean(loss_this_epoch))
if self.save_model_flag:
torch.save(self.model, self.save_model_path)
if (self.save_loss_fig and ((epoch % self.save_loss_fig_frequency) == 0)):
self.save_figure(epoch, loss_epochs, loss_this_epoch)
if (testset != 0):
loss_test = self.validate_model(test_datasets, test_labels)
print(f'Epoch [{epoch}/{self.n_epochs}], Training Loss: {np.mean(loss_this_epoch):.8f}, Test Loss: {loss_test:.8f}')
return loss_epochs<|docstring|>Train the dynamic model with input dataset
:param trainset: (Dictionary) The input training set
:param testset: (Dictionary) The input test set
:return:<|endoftext|> |
01002227f3d6ca9458616aadbff7d637592f4e7619d0b3eff94835e84ca4db4a | def predict(self, x):
'\n Given the current state and action, predict the next state\n\n :param x: (numpy array) current state and action in one array\n :return: (numpy array) next state numpy array\n '
x = np.array(x)
x = self.pre_process(x)
x_tensor = self.Variable(torch.FloatTensor(x).unsqueeze(0), volatile=True)
out_tensor = self.model(x_tensor)
out = out_tensor.cpu().detach().numpy()
out = self.after_process(out)
return out | Given the current state and action, predict the next state
:param x: (numpy array) current state and action in one array
:return: (numpy array) next state numpy array | MPC/MPC-Qube/dynamics.py | predict | lambert-x/Reinforcement-Learning-DQN-MPC | 5 | python | def predict(self, x):
'\n Given the current state and action, predict the next state\n\n :param x: (numpy array) current state and action in one array\n :return: (numpy array) next state numpy array\n '
x = np.array(x)
x = self.pre_process(x)
x_tensor = self.Variable(torch.FloatTensor(x).unsqueeze(0), volatile=True)
out_tensor = self.model(x_tensor)
out = out_tensor.cpu().detach().numpy()
out = self.after_process(out)
return out | def predict(self, x):
'\n Given the current state and action, predict the next state\n\n :param x: (numpy array) current state and action in one array\n :return: (numpy array) next state numpy array\n '
x = np.array(x)
x = self.pre_process(x)
x_tensor = self.Variable(torch.FloatTensor(x).unsqueeze(0), volatile=True)
out_tensor = self.model(x_tensor)
out = out_tensor.cpu().detach().numpy()
out = self.after_process(out)
return out<|docstring|>Given the current state and action, predict the next state
:param x: (numpy array) current state and action in one array
:return: (numpy array) next state numpy array<|endoftext|> |
f11b5486ae3b2e20ce90512fe32b2886325cc6869ccd0b0734f448044359eed7 | def pre_process(self, x):
'\n Pre-process the input data\n :param x: (numpy array) current state and action in one array\n :return: (numpy array) normalized input array\n '
x = ((x - self.mean_data) / self.std_data)
return x | Pre-process the input data
:param x: (numpy array) current state and action in one array
:return: (numpy array) normalized input array | MPC/MPC-Qube/dynamics.py | pre_process | lambert-x/Reinforcement-Learning-DQN-MPC | 5 | python | def pre_process(self, x):
'\n Pre-process the input data\n :param x: (numpy array) current state and action in one array\n :return: (numpy array) normalized input array\n '
x = ((x - self.mean_data) / self.std_data)
return x | def pre_process(self, x):
'\n Pre-process the input data\n :param x: (numpy array) current state and action in one array\n :return: (numpy array) normalized input array\n '
x = ((x - self.mean_data) / self.std_data)
return x<|docstring|>Pre-process the input data
:param x: (numpy array) current state and action in one array
:return: (numpy array) normalized input array<|endoftext|> |
c386efcd65a9982c973f4154f7cd821908f413ddef52345a2a886bca27dbfa25 | def norm_train_data(self, datas, labels):
'\n Normalize the training data and record the data distribution\n\n :param datas: (numpy array) input data\n :param labels: (numpy array) the label\n :return: (numpy array) normalized data and label\n '
self.mean_data = np.mean(datas, axis=0)
self.mean_label = np.mean(labels, axis=0)
self.std_data = np.std(datas, axis=0)
self.std_label = np.std(labels, axis=0)
datas = ((datas - self.mean_data) / self.std_data)
labels = ((labels - self.mean_label) / self.std_label)
return (datas, labels) | Normalize the training data and record the data distribution
:param datas: (numpy array) input data
:param labels: (numpy array) the label
:return: (numpy array) normalized data and label | MPC/MPC-Qube/dynamics.py | norm_train_data | lambert-x/Reinforcement-Learning-DQN-MPC | 5 | python | def norm_train_data(self, datas, labels):
'\n Normalize the training data and record the data distribution\n\n :param datas: (numpy array) input data\n :param labels: (numpy array) the label\n :return: (numpy array) normalized data and label\n '
self.mean_data = np.mean(datas, axis=0)
self.mean_label = np.mean(labels, axis=0)
self.std_data = np.std(datas, axis=0)
self.std_label = np.std(labels, axis=0)
datas = ((datas - self.mean_data) / self.std_data)
labels = ((labels - self.mean_label) / self.std_label)
return (datas, labels) | def norm_train_data(self, datas, labels):
'\n Normalize the training data and record the data distribution\n\n :param datas: (numpy array) input data\n :param labels: (numpy array) the label\n :return: (numpy array) normalized data and label\n '
self.mean_data = np.mean(datas, axis=0)
self.mean_label = np.mean(labels, axis=0)
self.std_data = np.std(datas, axis=0)
self.std_label = np.std(labels, axis=0)
datas = ((datas - self.mean_data) / self.std_data)
labels = ((labels - self.mean_label) / self.std_label)
return (datas, labels)<|docstring|>Normalize the training data and record the data distribution
:param datas: (numpy array) input data
:param labels: (numpy array) the label
:return: (numpy array) normalized data and label<|endoftext|> |
1d74752e75d8305591d0d09bad570da93872c84f1222837ef219aa91f1707bc9 | def norm_test_data(self, datas, labels):
'\n Normalize the test data\n\n :param datas: (numpy array) input data\n :param labels: (numpy array) the label\n :return: (numpy array) normalized data and label\n '
datas = ((datas - self.mean_data) / self.std_data)
labels = ((labels - self.mean_label) / self.std_label)
return (datas, labels) | Normalize the test data
:param datas: (numpy array) input data
:param labels: (numpy array) the label
:return: (numpy array) normalized data and label | MPC/MPC-Qube/dynamics.py | norm_test_data | lambert-x/Reinforcement-Learning-DQN-MPC | 5 | python | def norm_test_data(self, datas, labels):
'\n Normalize the test data\n\n :param datas: (numpy array) input data\n :param labels: (numpy array) the label\n :return: (numpy array) normalized data and label\n '
datas = ((datas - self.mean_data) / self.std_data)
labels = ((labels - self.mean_label) / self.std_label)
return (datas, labels) | def norm_test_data(self, datas, labels):
'\n Normalize the test data\n\n :param datas: (numpy array) input data\n :param labels: (numpy array) the label\n :return: (numpy array) normalized data and label\n '
datas = ((datas - self.mean_data) / self.std_data)
labels = ((labels - self.mean_label) / self.std_label)
return (datas, labels)<|docstring|>Normalize the test data
:param datas: (numpy array) input data
:param labels: (numpy array) the label
:return: (numpy array) normalized data and label<|endoftext|> |
10348fb303dee14b7ac5dbd46caa9ada5a2ffd0a6ed88cb0d6f04af490bc9a6e | def validate_model(self, datasets, labels):
'\n Validate the trained model\n\n :param datasets: (numpy array) input data\n :param labels: (numpy array) corresponding label\n :return: average loss\n '
test_dataset = MyDataset(datasets, labels)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=self.batch_size)
loss_list = []
for (i, (datas, labels)) in enumerate(test_loader):
datas = self.Variable(torch.FloatTensor(np.float32(datas)))
labels = self.Variable(torch.FloatTensor(np.float32(labels)))
outputs = self.model(datas)
loss = self.criterion(outputs, labels)
loss_list.append(loss.item())
loss_avr = np.average(loss_list)
return loss_avr | Validate the trained model
:param datasets: (numpy array) input data
:param labels: (numpy array) corresponding label
:return: average loss | MPC/MPC-Qube/dynamics.py | validate_model | lambert-x/Reinforcement-Learning-DQN-MPC | 5 | python | def validate_model(self, datasets, labels):
'\n Validate the trained model\n\n :param datasets: (numpy array) input data\n :param labels: (numpy array) corresponding label\n :return: average loss\n '
test_dataset = MyDataset(datasets, labels)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=self.batch_size)
loss_list = []
for (i, (datas, labels)) in enumerate(test_loader):
datas = self.Variable(torch.FloatTensor(np.float32(datas)))
labels = self.Variable(torch.FloatTensor(np.float32(labels)))
outputs = self.model(datas)
loss = self.criterion(outputs, labels)
loss_list.append(loss.item())
loss_avr = np.average(loss_list)
return loss_avr | def validate_model(self, datasets, labels):
'\n Validate the trained model\n\n :param datasets: (numpy array) input data\n :param labels: (numpy array) corresponding label\n :return: average loss\n '
test_dataset = MyDataset(datasets, labels)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=self.batch_size)
loss_list = []
for (i, (datas, labels)) in enumerate(test_loader):
datas = self.Variable(torch.FloatTensor(np.float32(datas)))
labels = self.Variable(torch.FloatTensor(np.float32(labels)))
outputs = self.model(datas)
loss = self.criterion(outputs, labels)
loss_list.append(loss.item())
loss_avr = np.average(loss_list)
return loss_avr<|docstring|>Validate the trained model
:param datasets: (numpy array) input data
:param labels: (numpy array) corresponding label
:return: average loss<|endoftext|> |
1c9a6718b46ff5f20439fc06d16107a97aba9058a3f7ba065d6e47c9021e6264 | def save_figure(self, epoch, loss_epochs, loss_this_epoch):
'\n Save the loss figures\n '
plt.clf()
plt.close('all')
plt.figure(figsize=(12, 5))
plt.subplot(121)
plt.title(('Loss Trend with %s Epochs' % epoch))
plt.plot(loss_epochs)
plt.subplot(122)
plt.title('Loss Trend in the latest Epoch')
plt.plot(loss_this_epoch)
plt.savefig((('storage/loss-' + str(self.exp_number)) + '.png')) | Save the loss figures | MPC/MPC-Qube/dynamics.py | save_figure | lambert-x/Reinforcement-Learning-DQN-MPC | 5 | python | def save_figure(self, epoch, loss_epochs, loss_this_epoch):
'\n \n '
plt.clf()
plt.close('all')
plt.figure(figsize=(12, 5))
plt.subplot(121)
plt.title(('Loss Trend with %s Epochs' % epoch))
plt.plot(loss_epochs)
plt.subplot(122)
plt.title('Loss Trend in the latest Epoch')
plt.plot(loss_this_epoch)
plt.savefig((('storage/loss-' + str(self.exp_number)) + '.png')) | def save_figure(self, epoch, loss_epochs, loss_this_epoch):
'\n \n '
plt.clf()
plt.close('all')
plt.figure(figsize=(12, 5))
plt.subplot(121)
plt.title(('Loss Trend with %s Epochs' % epoch))
plt.plot(loss_epochs)
plt.subplot(122)
plt.title('Loss Trend in the latest Epoch')
plt.plot(loss_this_epoch)
plt.savefig((('storage/loss-' + str(self.exp_number)) + '.png'))<|docstring|>Save the loss figures<|endoftext|> |
01acfe0be08a20e62ce8d68d65888cd7e7ca155508eab43354b2e2fce9bd4a74 | def model_validation(self, env, horizon=40, n_sample=200, mpc=[]):
'\n Validate the model in the environment\n\n :param env: OpenAI gym style environment\n :param horizon: The prediction horizon\n :param n_sample:\n :param mpc: whether to use the mpc to generate action\n :return: the errors along the horizon\n '
n_state = env.observation_space.shape[0]
errors = np.zeros([n_sample, horizon, n_state])
for i in range(n_sample):
state = env.reset()
state_pred = state.copy()
state_real = state.copy()
for j in range(horizon):
if (mpc != []):
action = mpc.act(state_pred, self)
action = np.array([action])
else:
action = env.action_space.sample()
input_data = np.concatenate((state_pred, action))
state_dt = self.predict(input_data)
state_pred = (state_pred + state_dt[0])
(state_real, reward, done, info) = env.step(action)
error_tmp = (state_real - state_pred)
errors[(i, j)] = abs(error_tmp)
errors_mean = np.mean(errors, axis=0)
errors_max = np.max(errors, axis=0)
errors_min = np.min(errors, axis=0)
errors_std = np.min(errors, axis=0)
return (errors_mean, errors_max, errors_min, errors_std) | Validate the model in the environment
:param env: OpenAI gym style environment
:param horizon: The prediction horizon
:param n_sample:
:param mpc: whether to use the mpc to generate action
:return: the errors along the horizon | MPC/MPC-Qube/dynamics.py | model_validation | lambert-x/Reinforcement-Learning-DQN-MPC | 5 | python | def model_validation(self, env, horizon=40, n_sample=200, mpc=[]):
'\n Validate the model in the environment\n\n :param env: OpenAI gym style environment\n :param horizon: The prediction horizon\n :param n_sample:\n :param mpc: whether to use the mpc to generate action\n :return: the errors along the horizon\n '
n_state = env.observation_space.shape[0]
errors = np.zeros([n_sample, horizon, n_state])
for i in range(n_sample):
state = env.reset()
state_pred = state.copy()
state_real = state.copy()
for j in range(horizon):
if (mpc != []):
action = mpc.act(state_pred, self)
action = np.array([action])
else:
action = env.action_space.sample()
input_data = np.concatenate((state_pred, action))
state_dt = self.predict(input_data)
state_pred = (state_pred + state_dt[0])
(state_real, reward, done, info) = env.step(action)
error_tmp = (state_real - state_pred)
errors[(i, j)] = abs(error_tmp)
errors_mean = np.mean(errors, axis=0)
errors_max = np.max(errors, axis=0)
errors_min = np.min(errors, axis=0)
errors_std = np.min(errors, axis=0)
return (errors_mean, errors_max, errors_min, errors_std) | def model_validation(self, env, horizon=40, n_sample=200, mpc=[]):
'\n Validate the model in the environment\n\n :param env: OpenAI gym style environment\n :param horizon: The prediction horizon\n :param n_sample:\n :param mpc: whether to use the mpc to generate action\n :return: the errors along the horizon\n '
n_state = env.observation_space.shape[0]
errors = np.zeros([n_sample, horizon, n_state])
for i in range(n_sample):
state = env.reset()
state_pred = state.copy()
state_real = state.copy()
for j in range(horizon):
if (mpc != []):
action = mpc.act(state_pred, self)
action = np.array([action])
else:
action = env.action_space.sample()
input_data = np.concatenate((state_pred, action))
state_dt = self.predict(input_data)
state_pred = (state_pred + state_dt[0])
(state_real, reward, done, info) = env.step(action)
error_tmp = (state_real - state_pred)
errors[(i, j)] = abs(error_tmp)
errors_mean = np.mean(errors, axis=0)
errors_max = np.max(errors, axis=0)
errors_min = np.min(errors, axis=0)
errors_std = np.min(errors, axis=0)
return (errors_mean, errors_max, errors_min, errors_std)<|docstring|>Validate the model in the environment
:param env: OpenAI gym style environment
:param horizon: The prediction horizon
:param n_sample:
:param mpc: whether to use the mpc to generate action
:return: the errors along the horizon<|endoftext|> |
971bc11b3b43712790fc7df1a2fd58e249c8b8c34108b1caaf77b145ae0b6094 | def plot_model_validation(self, env, horizon=40, n_sample=200, mpc=[], mode='mean'):
' Plot the model validation in the simulation environment'
if (mode == 'mean'):
errors = self.model_validation(env, horizon, n_sample, mpc)[0]
elif (mode == 'max'):
errors = self.model_validation(env, horizon, n_sample, mpc)[1]
elif (mode == 'min'):
errors = self.model_validation(env, horizon, n_sample, mpc)[2]
elif (mode == 'std'):
errors = self.model_validation(env, horizon, n_sample, mpc)[3]
else:
return 0
plt.close('all')
plt.ioff()
plt.figure(figsize=[12, 6])
plt.title((mode + ' state error between the predictive model and real world along different horizons'))
plt.xlabel('horizon')
plt.ylabel('error')
for i in range(errors.shape[1]):
plt.plot(errors[(:, i)], label=('state ' + str(i)))
plt.legend()
plt.savefig((('storage/model_error_exp_' + str(self.exp_number)) + '.png'))
plt.show() | Plot the model validation in the simulation environment | MPC/MPC-Qube/dynamics.py | plot_model_validation | lambert-x/Reinforcement-Learning-DQN-MPC | 5 | python | def plot_model_validation(self, env, horizon=40, n_sample=200, mpc=[], mode='mean'):
' '
if (mode == 'mean'):
errors = self.model_validation(env, horizon, n_sample, mpc)[0]
elif (mode == 'max'):
errors = self.model_validation(env, horizon, n_sample, mpc)[1]
elif (mode == 'min'):
errors = self.model_validation(env, horizon, n_sample, mpc)[2]
elif (mode == 'std'):
errors = self.model_validation(env, horizon, n_sample, mpc)[3]
else:
return 0
plt.close('all')
plt.ioff()
plt.figure(figsize=[12, 6])
plt.title((mode + ' state error between the predictive model and real world along different horizons'))
plt.xlabel('horizon')
plt.ylabel('error')
for i in range(errors.shape[1]):
plt.plot(errors[(:, i)], label=('state ' + str(i)))
plt.legend()
plt.savefig((('storage/model_error_exp_' + str(self.exp_number)) + '.png'))
plt.show() | def plot_model_validation(self, env, horizon=40, n_sample=200, mpc=[], mode='mean'):
' '
if (mode == 'mean'):
errors = self.model_validation(env, horizon, n_sample, mpc)[0]
elif (mode == 'max'):
errors = self.model_validation(env, horizon, n_sample, mpc)[1]
elif (mode == 'min'):
errors = self.model_validation(env, horizon, n_sample, mpc)[2]
elif (mode == 'std'):
errors = self.model_validation(env, horizon, n_sample, mpc)[3]
else:
return 0
plt.close('all')
plt.ioff()
plt.figure(figsize=[12, 6])
plt.title((mode + ' state error between the predictive model and real world along different horizons'))
plt.xlabel('horizon')
plt.ylabel('error')
for i in range(errors.shape[1]):
plt.plot(errors[(:, i)], label=('state ' + str(i)))
plt.legend()
plt.savefig((('storage/model_error_exp_' + str(self.exp_number)) + '.png'))
plt.show()<|docstring|>Plot the model validation in the simulation environment<|endoftext|> |
1945b7ba23e943c81513b333a5d40a6899e3e8f18ac4cb239e2cf42d27d748ec | def collect_random_dataset(self):
'\n Collect n_random_episodes data (numpy array) with maximum n_max_steps steps per episode\n '
datasets = []
labels = []
for i in range(self.n_random_episodes):
data_tmp = []
label_tmp = []
state_old = self.env.reset()
for j in range(self.n_max_steps):
action = self.env.action_space.sample()
data_tmp.append(np.concatenate((state_old, action)))
(state_new, reward, done, info) = self.env.step(action)
label_tmp.append((state_new - state_old))
if done:
break
state_old = state_new
data_tmp = np.array(data_tmp)
label_tmp = np.array(label_tmp)
if (datasets == []):
datasets = data_tmp
else:
datasets = np.concatenate((datasets, data_tmp))
if (labels == []):
labels = label_tmp
else:
labels = np.concatenate((labels, label_tmp))
data_and_label = np.concatenate((datasets, labels), axis=1)
np.random.shuffle(data_and_label)
print('Collect random dataset shape: ', datasets.shape)
testset_len = int((datasets.shape[0] * self.testset_split))
data_len = datasets.shape[1]
self.random_testset = {'data': data_and_label[(:testset_len, :data_len)], 'label': data_and_label[(:testset_len, data_len:)]}
self.random_trainset = {'data': data_and_label[(testset_len:, :data_len)], 'label': data_and_label[(testset_len:, data_len:)]}
self.random_dataset = {'data': datasets, 'label': labels}
self.all_dataset = self.random_dataset | Collect n_random_episodes data (numpy array) with maximum n_max_steps steps per episode | MPC/MPC-Qube/dynamics.py | collect_random_dataset | lambert-x/Reinforcement-Learning-DQN-MPC | 5 | python | def collect_random_dataset(self):
'\n \n '
datasets = []
labels = []
for i in range(self.n_random_episodes):
data_tmp = []
label_tmp = []
state_old = self.env.reset()
for j in range(self.n_max_steps):
action = self.env.action_space.sample()
data_tmp.append(np.concatenate((state_old, action)))
(state_new, reward, done, info) = self.env.step(action)
label_tmp.append((state_new - state_old))
if done:
break
state_old = state_new
data_tmp = np.array(data_tmp)
label_tmp = np.array(label_tmp)
if (datasets == []):
datasets = data_tmp
else:
datasets = np.concatenate((datasets, data_tmp))
if (labels == []):
labels = label_tmp
else:
labels = np.concatenate((labels, label_tmp))
data_and_label = np.concatenate((datasets, labels), axis=1)
np.random.shuffle(data_and_label)
print('Collect random dataset shape: ', datasets.shape)
testset_len = int((datasets.shape[0] * self.testset_split))
data_len = datasets.shape[1]
self.random_testset = {'data': data_and_label[(:testset_len, :data_len)], 'label': data_and_label[(:testset_len, data_len:)]}
self.random_trainset = {'data': data_and_label[(testset_len:, :data_len)], 'label': data_and_label[(testset_len:, data_len:)]}
self.random_dataset = {'data': datasets, 'label': labels}
self.all_dataset = self.random_dataset | def collect_random_dataset(self):
'\n \n '
datasets = []
labels = []
for i in range(self.n_random_episodes):
data_tmp = []
label_tmp = []
state_old = self.env.reset()
for j in range(self.n_max_steps):
action = self.env.action_space.sample()
data_tmp.append(np.concatenate((state_old, action)))
(state_new, reward, done, info) = self.env.step(action)
label_tmp.append((state_new - state_old))
if done:
break
state_old = state_new
data_tmp = np.array(data_tmp)
label_tmp = np.array(label_tmp)
if (datasets == []):
datasets = data_tmp
else:
datasets = np.concatenate((datasets, data_tmp))
if (labels == []):
labels = label_tmp
else:
labels = np.concatenate((labels, label_tmp))
data_and_label = np.concatenate((datasets, labels), axis=1)
np.random.shuffle(data_and_label)
print('Collect random dataset shape: ', datasets.shape)
testset_len = int((datasets.shape[0] * self.testset_split))
data_len = datasets.shape[1]
self.random_testset = {'data': data_and_label[(:testset_len, :data_len)], 'label': data_and_label[(:testset_len, data_len:)]}
self.random_trainset = {'data': data_and_label[(testset_len:, :data_len)], 'label': data_and_label[(testset_len:, data_len:)]}
self.random_dataset = {'data': datasets, 'label': labels}
self.all_dataset = self.random_dataset<|docstring|>Collect n_random_episodes data (numpy array) with maximum n_max_steps steps per episode<|endoftext|> |
60b9c8c49bf637a821e284427394474ccc8d31b3873da0be169c31c5000fcd33 | def collect_mpc_dataset(self, mpc, dynamic_model, render=False):
'\n Collect reinforced dataset by model predictive control\n\n :param mpc: MPC controller\n :param dynamic_model: System dynamic model\n :param render: Whether render the environment\n :return: list of reward of each episodes\n '
datasets = []
labels = []
reward_episodes = []
for i in range(self.n_mpc_episodes):
data_tmp = []
label_tmp = []
reward_episode = 0
state_old = self.env.reset()
for j in range(self.n_max_steps):
if render:
self.env.render()
action = mpc.act(state_old, dynamic_model)
action = np.array([action])
data_tmp.append(np.concatenate((state_old, action)))
(state_new, reward, done, info) = self.env.step(action)
reward_episode += reward
label_tmp.append((state_new - state_old))
if done:
break
state_old = state_new
data_tmp = np.array(data_tmp)
label_tmp = np.array(label_tmp)
if (datasets == []):
datasets = data_tmp
else:
datasets = np.concatenate((datasets, data_tmp))
if (labels == []):
labels = label_tmp
else:
labels = np.concatenate((labels, label_tmp))
reward_episodes.append(reward_episode)
print(f'Episode [{i}/{self.n_mpc_episodes}], Reward: {reward_episode:.8f}, Step: [{j}/{self.n_max_steps}]')
self.mpc_dataset = {'data': datasets, 'label': labels}
self.mpc_dataset_len = datasets.shape[0]
print(('Totally collect %s data based on MPC' % self.mpc_dataset_len))
all_datasets = np.concatenate((datasets, self.all_dataset['data']))
all_labels = np.concatenate((labels, self.all_dataset['label']))
self.all_dataset = {'data': all_datasets, 'label': all_labels}
if self.save_flag:
self.save_datasets(self.all_dataset)
return reward_episodes | Collect reinforced dataset by model predictive control
:param mpc: MPC controller
:param dynamic_model: System dynamic model
:param render: Whether render the environment
:return: list of reward of each episodes | MPC/MPC-Qube/dynamics.py | collect_mpc_dataset | lambert-x/Reinforcement-Learning-DQN-MPC | 5 | python | def collect_mpc_dataset(self, mpc, dynamic_model, render=False):
'\n Collect reinforced dataset by model predictive control\n\n :param mpc: MPC controller\n :param dynamic_model: System dynamic model\n :param render: Whether render the environment\n :return: list of reward of each episodes\n '
datasets = []
labels = []
reward_episodes = []
for i in range(self.n_mpc_episodes):
data_tmp = []
label_tmp = []
reward_episode = 0
state_old = self.env.reset()
for j in range(self.n_max_steps):
if render:
self.env.render()
action = mpc.act(state_old, dynamic_model)
action = np.array([action])
data_tmp.append(np.concatenate((state_old, action)))
(state_new, reward, done, info) = self.env.step(action)
reward_episode += reward
label_tmp.append((state_new - state_old))
if done:
break
state_old = state_new
data_tmp = np.array(data_tmp)
label_tmp = np.array(label_tmp)
if (datasets == []):
datasets = data_tmp
else:
datasets = np.concatenate((datasets, data_tmp))
if (labels == []):
labels = label_tmp
else:
labels = np.concatenate((labels, label_tmp))
reward_episodes.append(reward_episode)
print(f'Episode [{i}/{self.n_mpc_episodes}], Reward: {reward_episode:.8f}, Step: [{j}/{self.n_max_steps}]')
self.mpc_dataset = {'data': datasets, 'label': labels}
self.mpc_dataset_len = datasets.shape[0]
print(('Totally collect %s data based on MPC' % self.mpc_dataset_len))
all_datasets = np.concatenate((datasets, self.all_dataset['data']))
all_labels = np.concatenate((labels, self.all_dataset['label']))
self.all_dataset = {'data': all_datasets, 'label': all_labels}
if self.save_flag:
self.save_datasets(self.all_dataset)
return reward_episodes | def collect_mpc_dataset(self, mpc, dynamic_model, render=False):
'\n Collect reinforced dataset by model predictive control\n\n :param mpc: MPC controller\n :param dynamic_model: System dynamic model\n :param render: Whether render the environment\n :return: list of reward of each episodes\n '
datasets = []
labels = []
reward_episodes = []
for i in range(self.n_mpc_episodes):
data_tmp = []
label_tmp = []
reward_episode = 0
state_old = self.env.reset()
for j in range(self.n_max_steps):
if render:
self.env.render()
action = mpc.act(state_old, dynamic_model)
action = np.array([action])
data_tmp.append(np.concatenate((state_old, action)))
(state_new, reward, done, info) = self.env.step(action)
reward_episode += reward
label_tmp.append((state_new - state_old))
if done:
break
state_old = state_new
data_tmp = np.array(data_tmp)
label_tmp = np.array(label_tmp)
if (datasets == []):
datasets = data_tmp
else:
datasets = np.concatenate((datasets, data_tmp))
if (labels == []):
labels = label_tmp
else:
labels = np.concatenate((labels, label_tmp))
reward_episodes.append(reward_episode)
print(f'Episode [{i}/{self.n_mpc_episodes}], Reward: {reward_episode:.8f}, Step: [{j}/{self.n_max_steps}]')
self.mpc_dataset = {'data': datasets, 'label': labels}
self.mpc_dataset_len = datasets.shape[0]
print(('Totally collect %s data based on MPC' % self.mpc_dataset_len))
all_datasets = np.concatenate((datasets, self.all_dataset['data']))
all_labels = np.concatenate((labels, self.all_dataset['label']))
self.all_dataset = {'data': all_datasets, 'label': all_labels}
if self.save_flag:
self.save_datasets(self.all_dataset)
return reward_episodes<|docstring|>Collect reinforced dataset by model predictive control
:param mpc: MPC controller
:param dynamic_model: System dynamic model
:param render: Whether render the environment
:return: list of reward of each episodes<|endoftext|> |
a0c2e17bea424858c18654a8853cdea9635d47cc49343f2355d6a9ae0cfa2c0e | def make_dataset(self):
'\n Sample the training dataset from MPC-based data and previous data\n :return: (numpy array) trainingset and testset\n '
all_length = max(int((self.mpc_dataset_len / self.mpc_dataset_split)), self.min_train_samples)
sample_length = (all_length - self.mpc_dataset_len)
sample_length = min(self.all_dataset['data'].shape[0], sample_length)
print(('Sample %s training data from all previous dataset, total training sample: %s' % (sample_length, all_length)))
data_and_label = np.concatenate((self.all_dataset['data'], self.all_dataset['label']), axis=1)
np.random.shuffle(data_and_label)
testset_len = min(int((all_length * self.testset_split)), self.all_dataset['data'].shape[0])
data_len = self.mpc_dataset['data'].shape[1]
trainset_data = np.concatenate((self.mpc_dataset['data'], data_and_label[(:sample_length, :data_len)]))
trainset_label = np.concatenate((self.mpc_dataset['label'], data_and_label[(:sample_length, data_len:)]))
testset_data = data_and_label[(testset_len:, :data_len)]
testset_label = data_and_label[(testset_len:, data_len:)]
trainset = {'data': trainset_data, 'label': trainset_label}
testset = {'data': testset_data, 'label': testset_label}
return (trainset, testset) | Sample the training dataset from MPC-based data and previous data
:return: (numpy array) trainingset and testset | MPC/MPC-Qube/dynamics.py | make_dataset | lambert-x/Reinforcement-Learning-DQN-MPC | 5 | python | def make_dataset(self):
'\n Sample the training dataset from MPC-based data and previous data\n :return: (numpy array) trainingset and testset\n '
all_length = max(int((self.mpc_dataset_len / self.mpc_dataset_split)), self.min_train_samples)
sample_length = (all_length - self.mpc_dataset_len)
sample_length = min(self.all_dataset['data'].shape[0], sample_length)
print(('Sample %s training data from all previous dataset, total training sample: %s' % (sample_length, all_length)))
data_and_label = np.concatenate((self.all_dataset['data'], self.all_dataset['label']), axis=1)
np.random.shuffle(data_and_label)
testset_len = min(int((all_length * self.testset_split)), self.all_dataset['data'].shape[0])
data_len = self.mpc_dataset['data'].shape[1]
trainset_data = np.concatenate((self.mpc_dataset['data'], data_and_label[(:sample_length, :data_len)]))
trainset_label = np.concatenate((self.mpc_dataset['label'], data_and_label[(:sample_length, data_len:)]))
testset_data = data_and_label[(testset_len:, :data_len)]
testset_label = data_and_label[(testset_len:, data_len:)]
trainset = {'data': trainset_data, 'label': trainset_label}
testset = {'data': testset_data, 'label': testset_label}
return (trainset, testset) | def make_dataset(self):
'\n Sample the training dataset from MPC-based data and previous data\n :return: (numpy array) trainingset and testset\n '
all_length = max(int((self.mpc_dataset_len / self.mpc_dataset_split)), self.min_train_samples)
sample_length = (all_length - self.mpc_dataset_len)
sample_length = min(self.all_dataset['data'].shape[0], sample_length)
print(('Sample %s training data from all previous dataset, total training sample: %s' % (sample_length, all_length)))
data_and_label = np.concatenate((self.all_dataset['data'], self.all_dataset['label']), axis=1)
np.random.shuffle(data_and_label)
testset_len = min(int((all_length * self.testset_split)), self.all_dataset['data'].shape[0])
data_len = self.mpc_dataset['data'].shape[1]
trainset_data = np.concatenate((self.mpc_dataset['data'], data_and_label[(:sample_length, :data_len)]))
trainset_label = np.concatenate((self.mpc_dataset['label'], data_and_label[(:sample_length, data_len:)]))
testset_data = data_and_label[(testset_len:, :data_len)]
testset_label = data_and_label[(testset_len:, data_len:)]
trainset = {'data': trainset_data, 'label': trainset_label}
testset = {'data': testset_data, 'label': testset_label}
return (trainset, testset)<|docstring|>Sample the training dataset from MPC-based data and previous data
:return: (numpy array) trainingset and testset<|endoftext|> |
cdf2c7029a76193d140bc744c8598b42e240011e764eb7e0f8f8594fc7926c8a | def save_datasets(self, data):
'Save the collected dataset (dictionary)'
print(('Saving all datas to %s' % self.save_path))
with open(self.save_path, 'wb') as f:
pickle.dump(data, f, (- 1)) | Save the collected dataset (dictionary) | MPC/MPC-Qube/dynamics.py | save_datasets | lambert-x/Reinforcement-Learning-DQN-MPC | 5 | python | def save_datasets(self, data):
print(('Saving all datas to %s' % self.save_path))
with open(self.save_path, 'wb') as f:
pickle.dump(data, f, (- 1)) | def save_datasets(self, data):
print(('Saving all datas to %s' % self.save_path))
with open(self.save_path, 'wb') as f:
pickle.dump(data, f, (- 1))<|docstring|>Save the collected dataset (dictionary)<|endoftext|> |
75d1823dabfd3826a668ba8c29cac4c1c0fa3a9da71ab4fe04e8428ad787fc07 | def load_dataset(self):
'Load the dataset (dictionary)'
print(('Load datas from %s' % self.load_path))
with open(self.load_path, 'rb') as f:
dataset = pickle.load(f)
return dataset | Load the dataset (dictionary) | MPC/MPC-Qube/dynamics.py | load_dataset | lambert-x/Reinforcement-Learning-DQN-MPC | 5 | python | def load_dataset(self):
print(('Load datas from %s' % self.load_path))
with open(self.load_path, 'rb') as f:
dataset = pickle.load(f)
return dataset | def load_dataset(self):
print(('Load datas from %s' % self.load_path))
with open(self.load_path, 'rb') as f:
dataset = pickle.load(f)
return dataset<|docstring|>Load the dataset (dictionary)<|endoftext|> |
6fcac8c37af22faa69a3271da4a20e4210e8f6181db7bfd76003a8c870a09244 | def check_glob(path, group, disable_glob):
'Find files with glob.'
if (isinstance(path, str) and (not disable_glob)):
path_glob = glob(path)
if path_glob:
path = sorted(path_glob)
msg = '\n'.join((f'{i}: {os.path.normpath(fpath)}' for (i, fpath) in enumerate(path, 1)))
len_p = len(path)
_log.info("glob found %d files for '%s':\n%s", len_p, group, msg)
return path | Find files with glob. | arviz/data/io_cmdstan.py | check_glob | peterroelants/arviz | 1,159 | python | def check_glob(path, group, disable_glob):
if (isinstance(path, str) and (not disable_glob)):
path_glob = glob(path)
if path_glob:
path = sorted(path_glob)
msg = '\n'.join((f'{i}: {os.path.normpath(fpath)}' for (i, fpath) in enumerate(path, 1)))
len_p = len(path)
_log.info("glob found %d files for '%s':\n%s", len_p, group, msg)
return path | def check_glob(path, group, disable_glob):
if (isinstance(path, str) and (not disable_glob)):
path_glob = glob(path)
if path_glob:
path = sorted(path_glob)
msg = '\n'.join((f'{i}: {os.path.normpath(fpath)}' for (i, fpath) in enumerate(path, 1)))
len_p = len(path)
_log.info("glob found %d files for '%s':\n%s", len_p, group, msg)
return path<|docstring|>Find files with glob.<|endoftext|> |
fe73e234b99002cf2cf6cd1fd67779dffe4f5a69617e039d6878f9380d974c34 | def _process_configuration(comments):
'Extract sampling information.'
results = {'comments': '\n'.join(comments), 'stan_version': {}}
comments_gen = iter(comments)
for comment in comments_gen:
comment = re.sub('^\\s*#\\s*|\\s*\\(Default\\)\\s*$', '', comment).strip()
if comment.startswith('stan_version_'):
(key, val) = re.sub('^\\s*stan_version_', '', comment).split('=')
results['stan_version'][key.strip()] = val.strip()
elif comment.startswith('Step size'):
(_, val) = comment.split('=')
results['step_size'] = float(val.strip())
elif ('inverse mass matrix' in comment):
comment = re.sub('^\\s*#\\s*', '', next(comments_gen)).strip()
results['inverse_mass_matrix'] = [float(item) for item in comment.split(',')]
elif (('seconds' in comment) and any(((item in comment) for item in ('(Warm-up)', '(Sampling)', '(Total)')))):
value = re.sub('^Elapsed\\s*Time:\\s*|\\s*seconds\\s*\\(Warm-up\\)\\s*|\\s*seconds\\s*\\(Sampling\\)\\s*|\\s*seconds\\s*\\(Total\\)\\s*', '', comment)
key = ('warmup_time_seconds' if ('(Warm-up)' in comment) else ('sampling_time_seconds' if ('(Sampling)' in comment) else 'total_time_seconds'))
results[key] = float(value)
elif ('=' in comment):
match_int = re.search('^(\\S+)\\s*=\\s*([-+]?[0-9]+)$', comment)
match_float = re.search('^(\\S+)\\s*=\\s*([-+]?[0-9]+\\.[0-9]+)$', comment)
match_str = re.search('^(\\S+)\\s*=\\s*(\\S+)$', comment)
match_empty = re.search('^(\\S+)\\s*=\\s*$', comment)
if match_int:
(key, value) = (match_int.group(1), match_int.group(2))
results[key] = int(value)
elif match_float:
(key, value) = (match_float.group(1), match_float.group(2))
results[key] = float(value)
elif match_str:
(key, value) = (match_str.group(1), match_str.group(2))
results[key] = value
elif match_empty:
key = match_empty.group(1)
results[key] = None
results = {key: str(results[key]) for key in sorted(results)}
return results | Extract sampling information. | arviz/data/io_cmdstan.py | _process_configuration | peterroelants/arviz | 1,159 | python | def _process_configuration(comments):
results = {'comments': '\n'.join(comments), 'stan_version': {}}
comments_gen = iter(comments)
for comment in comments_gen:
comment = re.sub('^\\s*#\\s*|\\s*\\(Default\\)\\s*$', , comment).strip()
if comment.startswith('stan_version_'):
(key, val) = re.sub('^\\s*stan_version_', , comment).split('=')
results['stan_version'][key.strip()] = val.strip()
elif comment.startswith('Step size'):
(_, val) = comment.split('=')
results['step_size'] = float(val.strip())
elif ('inverse mass matrix' in comment):
comment = re.sub('^\\s*#\\s*', , next(comments_gen)).strip()
results['inverse_mass_matrix'] = [float(item) for item in comment.split(',')]
elif (('seconds' in comment) and any(((item in comment) for item in ('(Warm-up)', '(Sampling)', '(Total)')))):
value = re.sub('^Elapsed\\s*Time:\\s*|\\s*seconds\\s*\\(Warm-up\\)\\s*|\\s*seconds\\s*\\(Sampling\\)\\s*|\\s*seconds\\s*\\(Total\\)\\s*', , comment)
key = ('warmup_time_seconds' if ('(Warm-up)' in comment) else ('sampling_time_seconds' if ('(Sampling)' in comment) else 'total_time_seconds'))
results[key] = float(value)
elif ('=' in comment):
match_int = re.search('^(\\S+)\\s*=\\s*([-+]?[0-9]+)$', comment)
match_float = re.search('^(\\S+)\\s*=\\s*([-+]?[0-9]+\\.[0-9]+)$', comment)
match_str = re.search('^(\\S+)\\s*=\\s*(\\S+)$', comment)
match_empty = re.search('^(\\S+)\\s*=\\s*$', comment)
if match_int:
(key, value) = (match_int.group(1), match_int.group(2))
results[key] = int(value)
elif match_float:
(key, value) = (match_float.group(1), match_float.group(2))
results[key] = float(value)
elif match_str:
(key, value) = (match_str.group(1), match_str.group(2))
results[key] = value
elif match_empty:
key = match_empty.group(1)
results[key] = None
results = {key: str(results[key]) for key in sorted(results)}
return results | def _process_configuration(comments):
results = {'comments': '\n'.join(comments), 'stan_version': {}}
comments_gen = iter(comments)
for comment in comments_gen:
comment = re.sub('^\\s*#\\s*|\\s*\\(Default\\)\\s*$', , comment).strip()
if comment.startswith('stan_version_'):
(key, val) = re.sub('^\\s*stan_version_', , comment).split('=')
results['stan_version'][key.strip()] = val.strip()
elif comment.startswith('Step size'):
(_, val) = comment.split('=')
results['step_size'] = float(val.strip())
elif ('inverse mass matrix' in comment):
comment = re.sub('^\\s*#\\s*', , next(comments_gen)).strip()
results['inverse_mass_matrix'] = [float(item) for item in comment.split(',')]
elif (('seconds' in comment) and any(((item in comment) for item in ('(Warm-up)', '(Sampling)', '(Total)')))):
value = re.sub('^Elapsed\\s*Time:\\s*|\\s*seconds\\s*\\(Warm-up\\)\\s*|\\s*seconds\\s*\\(Sampling\\)\\s*|\\s*seconds\\s*\\(Total\\)\\s*', , comment)
key = ('warmup_time_seconds' if ('(Warm-up)' in comment) else ('sampling_time_seconds' if ('(Sampling)' in comment) else 'total_time_seconds'))
results[key] = float(value)
elif ('=' in comment):
match_int = re.search('^(\\S+)\\s*=\\s*([-+]?[0-9]+)$', comment)
match_float = re.search('^(\\S+)\\s*=\\s*([-+]?[0-9]+\\.[0-9]+)$', comment)
match_str = re.search('^(\\S+)\\s*=\\s*(\\S+)$', comment)
match_empty = re.search('^(\\S+)\\s*=\\s*$', comment)
if match_int:
(key, value) = (match_int.group(1), match_int.group(2))
results[key] = int(value)
elif match_float:
(key, value) = (match_float.group(1), match_float.group(2))
results[key] = float(value)
elif match_str:
(key, value) = (match_str.group(1), match_str.group(2))
results[key] = value
elif match_empty:
key = match_empty.group(1)
results[key] = None
results = {key: str(results[key]) for key in sorted(results)}
return results<|docstring|>Extract sampling information.<|endoftext|> |
ee0d801c1a30c488cab17af62ffa284d059b5aad9c1920b603c1db7b2428cbde | def _read_output_file(path):
'Read Stan csv file to ndarray.'
comments = []
data = []
columns = None
with open(path, 'rb') as f_obj:
for line in f_obj:
if line.startswith(b'#'):
comments.append(line.strip().decode('utf-8'))
continue
columns = {key: idx for (idx, key) in enumerate(line.strip().decode('utf-8').split(','))}
break
for line in f_obj:
line = line.strip()
if line.startswith(b'#'):
comments.append(line.decode('utf-8'))
continue
if line:
data.append(np.array(line.split(b','), dtype=np.float64))
return (columns, np.array(data, dtype=np.float64), comments) | Read Stan csv file to ndarray. | arviz/data/io_cmdstan.py | _read_output_file | peterroelants/arviz | 1,159 | python | def _read_output_file(path):
comments = []
data = []
columns = None
with open(path, 'rb') as f_obj:
for line in f_obj:
if line.startswith(b'#'):
comments.append(line.strip().decode('utf-8'))
continue
columns = {key: idx for (idx, key) in enumerate(line.strip().decode('utf-8').split(','))}
break
for line in f_obj:
line = line.strip()
if line.startswith(b'#'):
comments.append(line.decode('utf-8'))
continue
if line:
data.append(np.array(line.split(b','), dtype=np.float64))
return (columns, np.array(data, dtype=np.float64), comments) | def _read_output_file(path):
comments = []
data = []
columns = None
with open(path, 'rb') as f_obj:
for line in f_obj:
if line.startswith(b'#'):
comments.append(line.strip().decode('utf-8'))
continue
columns = {key: idx for (idx, key) in enumerate(line.strip().decode('utf-8').split(','))}
break
for line in f_obj:
line = line.strip()
if line.startswith(b'#'):
comments.append(line.decode('utf-8'))
continue
if line:
data.append(np.array(line.split(b','), dtype=np.float64))
return (columns, np.array(data, dtype=np.float64), comments)<|docstring|>Read Stan csv file to ndarray.<|endoftext|> |
a0b07adcee05e001492a7b3334831dbf759826a783835f730c79b573f791bccb | def _read_output(path):
'Read CmdStan output csv file.\n\n Parameters\n ----------\n path : str\n\n Returns\n -------\n Dict[str, Any]\n '
(columns, data, comments) = _read_output_file(path)
pconf = _process_configuration(comments)
saved_warmup = ((int(pconf.get('save_warmup', 0)) * int(pconf.get('num_warmup', 0))) // int(pconf.get('thin', 1)))
data_warmup = data[:saved_warmup]
data = data[saved_warmup:]
sample_stats_columns = {col: idx for (col, idx) in columns.items() if col.endswith('__')}
sample_columns = {col: idx for (col, idx) in columns.items() if (col not in sample_stats_columns)}
return {'sample': data, 'sample_warmup': data_warmup, 'sample_columns': sample_columns, 'sample_stats_columns': sample_stats_columns, 'configuration_info': pconf} | Read CmdStan output csv file.
Parameters
----------
path : str
Returns
-------
Dict[str, Any] | arviz/data/io_cmdstan.py | _read_output | peterroelants/arviz | 1,159 | python | def _read_output(path):
'Read CmdStan output csv file.\n\n Parameters\n ----------\n path : str\n\n Returns\n -------\n Dict[str, Any]\n '
(columns, data, comments) = _read_output_file(path)
pconf = _process_configuration(comments)
saved_warmup = ((int(pconf.get('save_warmup', 0)) * int(pconf.get('num_warmup', 0))) // int(pconf.get('thin', 1)))
data_warmup = data[:saved_warmup]
data = data[saved_warmup:]
sample_stats_columns = {col: idx for (col, idx) in columns.items() if col.endswith('__')}
sample_columns = {col: idx for (col, idx) in columns.items() if (col not in sample_stats_columns)}
return {'sample': data, 'sample_warmup': data_warmup, 'sample_columns': sample_columns, 'sample_stats_columns': sample_stats_columns, 'configuration_info': pconf} | def _read_output(path):
'Read CmdStan output csv file.\n\n Parameters\n ----------\n path : str\n\n Returns\n -------\n Dict[str, Any]\n '
(columns, data, comments) = _read_output_file(path)
pconf = _process_configuration(comments)
saved_warmup = ((int(pconf.get('save_warmup', 0)) * int(pconf.get('num_warmup', 0))) // int(pconf.get('thin', 1)))
data_warmup = data[:saved_warmup]
data = data[saved_warmup:]
sample_stats_columns = {col: idx for (col, idx) in columns.items() if col.endswith('__')}
sample_columns = {col: idx for (col, idx) in columns.items() if (col not in sample_stats_columns)}
return {'sample': data, 'sample_warmup': data_warmup, 'sample_columns': sample_columns, 'sample_stats_columns': sample_stats_columns, 'configuration_info': pconf}<|docstring|>Read CmdStan output csv file.
Parameters
----------
path : str
Returns
-------
Dict[str, Any]<|endoftext|> |
ab2d59585339ec1fc2bf264e8071fbf7bd2ce841a471af77f346fb04256bc480 | def _process_data_var(string):
'Transform datastring to key, values pair.\n\n All values are transformed to floating point values.\n\n Parameters\n ----------\n string : str\n\n Returns\n -------\n Tuple[Str, Str]\n key, values pair\n '
(key, var) = string.split('<-')
if ('structure' in var):
(var, dim) = var.replace('structure(', '').replace(',', '').split('.Dim')
dtype = float
var = var.replace('c(', '').replace(')', '').strip().split()
dim = dim.replace('=', '').replace('c(', '').replace(')', '').strip().split()
dim = tuple(map(int, dim))
var = np.fromiter(map(dtype, var), dtype).reshape(dim, order='F')
elif ('c(' in var):
dtype = float
var = var.replace('c(', '').replace(')', '').split(',')
var = np.fromiter(map(dtype, var), dtype)
else:
dtype = float
var = dtype(var)
return (key.strip(), var) | Transform datastring to key, values pair.
All values are transformed to floating point values.
Parameters
----------
string : str
Returns
-------
Tuple[Str, Str]
key, values pair | arviz/data/io_cmdstan.py | _process_data_var | peterroelants/arviz | 1,159 | python | def _process_data_var(string):
'Transform datastring to key, values pair.\n\n All values are transformed to floating point values.\n\n Parameters\n ----------\n string : str\n\n Returns\n -------\n Tuple[Str, Str]\n key, values pair\n '
(key, var) = string.split('<-')
if ('structure' in var):
(var, dim) = var.replace('structure(', ).replace(',', ).split('.Dim')
dtype = float
var = var.replace('c(', ).replace(')', ).strip().split()
dim = dim.replace('=', ).replace('c(', ).replace(')', ).strip().split()
dim = tuple(map(int, dim))
var = np.fromiter(map(dtype, var), dtype).reshape(dim, order='F')
elif ('c(' in var):
dtype = float
var = var.replace('c(', ).replace(')', ).split(',')
var = np.fromiter(map(dtype, var), dtype)
else:
dtype = float
var = dtype(var)
return (key.strip(), var) | def _process_data_var(string):
'Transform datastring to key, values pair.\n\n All values are transformed to floating point values.\n\n Parameters\n ----------\n string : str\n\n Returns\n -------\n Tuple[Str, Str]\n key, values pair\n '
(key, var) = string.split('<-')
if ('structure' in var):
(var, dim) = var.replace('structure(', ).replace(',', ).split('.Dim')
dtype = float
var = var.replace('c(', ).replace(')', ).strip().split()
dim = dim.replace('=', ).replace('c(', ).replace(')', ).strip().split()
dim = tuple(map(int, dim))
var = np.fromiter(map(dtype, var), dtype).reshape(dim, order='F')
elif ('c(' in var):
dtype = float
var = var.replace('c(', ).replace(')', ).split(',')
var = np.fromiter(map(dtype, var), dtype)
else:
dtype = float
var = dtype(var)
return (key.strip(), var)<|docstring|>Transform datastring to key, values pair.
All values are transformed to floating point values.
Parameters
----------
string : str
Returns
-------
Tuple[Str, Str]
key, values pair<|endoftext|> |
bba11a5cb98c61aa511eb70b2f1c0eb63d3577a899054142a2d2048632efbca5 | def _read_data(path):
'Read Rdump or JSON output to dictionary.\n\n Parameters\n ----------\n path : str\n\n Returns\n -------\n Dict\n key, values pairs from Rdump/JSON formatted data.\n '
data = {}
with open(path, 'r', encoding='utf8') as f_obj:
if path.lower().endswith('.json'):
return json.load(f_obj)
var = ''
for line in f_obj:
if ('<-' in line):
if len(var):
(key, var) = _process_data_var(var)
data[key] = var
var = ''
var += (' ' + line.strip())
if len(var):
(key, var) = _process_data_var(var)
data[key] = var
return data | Read Rdump or JSON output to dictionary.
Parameters
----------
path : str
Returns
-------
Dict
key, values pairs from Rdump/JSON formatted data. | arviz/data/io_cmdstan.py | _read_data | peterroelants/arviz | 1,159 | python | def _read_data(path):
'Read Rdump or JSON output to dictionary.\n\n Parameters\n ----------\n path : str\n\n Returns\n -------\n Dict\n key, values pairs from Rdump/JSON formatted data.\n '
data = {}
with open(path, 'r', encoding='utf8') as f_obj:
if path.lower().endswith('.json'):
return json.load(f_obj)
var =
for line in f_obj:
if ('<-' in line):
if len(var):
(key, var) = _process_data_var(var)
data[key] = var
var =
var += (' ' + line.strip())
if len(var):
(key, var) = _process_data_var(var)
data[key] = var
return data | def _read_data(path):
'Read Rdump or JSON output to dictionary.\n\n Parameters\n ----------\n path : str\n\n Returns\n -------\n Dict\n key, values pairs from Rdump/JSON formatted data.\n '
data = {}
with open(path, 'r', encoding='utf8') as f_obj:
if path.lower().endswith('.json'):
return json.load(f_obj)
var =
for line in f_obj:
if ('<-' in line):
if len(var):
(key, var) = _process_data_var(var)
data[key] = var
var =
var += (' ' + line.strip())
if len(var):
(key, var) = _process_data_var(var)
data[key] = var
return data<|docstring|>Read Rdump or JSON output to dictionary.
Parameters
----------
path : str
Returns
-------
Dict
key, values pairs from Rdump/JSON formatted data.<|endoftext|> |
35e2cf1edcb1a61d4c1ad5bc21246d9bb9134856763870e2ca93d451482b6f7e | def _unpack_ndarrays(arrays, columns, dtypes=None):
'Transform a list of ndarrays to dictionary containing ndarrays.\n\n Parameters\n ----------\n arrays : List[np.ndarray]\n columns: Dict[str, int]\n dtypes: Dict[str, Any]\n\n Returns\n -------\n Dict\n key, values pairs. Values are formatted to shape = (nchain, ndraws, *shape)\n '
col_groups = defaultdict(list)
for (col, col_idx) in columns.items():
(key, *loc) = col.split('.')
loc = tuple(((int(i) - 1) for i in loc))
col_groups[key].append((col_idx, loc))
chains = len(arrays)
draws = len(arrays[0])
sample = {}
if draws:
for (key, cols_locs) in col_groups.items():
ndim = (np.array([loc for (_, loc) in cols_locs]).max(0) + 1)
dtype = dtypes.get(key, np.float64)
sample[key] = np.zeros((chains, draws, *ndim), dtype=dtype)
for (col, loc) in cols_locs:
for (chain_id, arr) in enumerate(arrays):
draw = arr[(:, col)]
if (loc == ()):
sample[key][(chain_id, :)] = draw
else:
axis1_all = range(sample[key].shape[1])
slicer = (chain_id, axis1_all, *loc)
sample[key][slicer] = draw
return sample | Transform a list of ndarrays to dictionary containing ndarrays.
Parameters
----------
arrays : List[np.ndarray]
columns: Dict[str, int]
dtypes: Dict[str, Any]
Returns
-------
Dict
key, values pairs. Values are formatted to shape = (nchain, ndraws, *shape) | arviz/data/io_cmdstan.py | _unpack_ndarrays | peterroelants/arviz | 1,159 | python | def _unpack_ndarrays(arrays, columns, dtypes=None):
'Transform a list of ndarrays to dictionary containing ndarrays.\n\n Parameters\n ----------\n arrays : List[np.ndarray]\n columns: Dict[str, int]\n dtypes: Dict[str, Any]\n\n Returns\n -------\n Dict\n key, values pairs. Values are formatted to shape = (nchain, ndraws, *shape)\n '
col_groups = defaultdict(list)
for (col, col_idx) in columns.items():
(key, *loc) = col.split('.')
loc = tuple(((int(i) - 1) for i in loc))
col_groups[key].append((col_idx, loc))
chains = len(arrays)
draws = len(arrays[0])
sample = {}
if draws:
for (key, cols_locs) in col_groups.items():
ndim = (np.array([loc for (_, loc) in cols_locs]).max(0) + 1)
dtype = dtypes.get(key, np.float64)
sample[key] = np.zeros((chains, draws, *ndim), dtype=dtype)
for (col, loc) in cols_locs:
for (chain_id, arr) in enumerate(arrays):
draw = arr[(:, col)]
if (loc == ()):
sample[key][(chain_id, :)] = draw
else:
axis1_all = range(sample[key].shape[1])
slicer = (chain_id, axis1_all, *loc)
sample[key][slicer] = draw
return sample | def _unpack_ndarrays(arrays, columns, dtypes=None):
'Transform a list of ndarrays to dictionary containing ndarrays.\n\n Parameters\n ----------\n arrays : List[np.ndarray]\n columns: Dict[str, int]\n dtypes: Dict[str, Any]\n\n Returns\n -------\n Dict\n key, values pairs. Values are formatted to shape = (nchain, ndraws, *shape)\n '
col_groups = defaultdict(list)
for (col, col_idx) in columns.items():
(key, *loc) = col.split('.')
loc = tuple(((int(i) - 1) for i in loc))
col_groups[key].append((col_idx, loc))
chains = len(arrays)
draws = len(arrays[0])
sample = {}
if draws:
for (key, cols_locs) in col_groups.items():
ndim = (np.array([loc for (_, loc) in cols_locs]).max(0) + 1)
dtype = dtypes.get(key, np.float64)
sample[key] = np.zeros((chains, draws, *ndim), dtype=dtype)
for (col, loc) in cols_locs:
for (chain_id, arr) in enumerate(arrays):
draw = arr[(:, col)]
if (loc == ()):
sample[key][(chain_id, :)] = draw
else:
axis1_all = range(sample[key].shape[1])
slicer = (chain_id, axis1_all, *loc)
sample[key][slicer] = draw
return sample<|docstring|>Transform a list of ndarrays to dictionary containing ndarrays.
Parameters
----------
arrays : List[np.ndarray]
columns: Dict[str, int]
dtypes: Dict[str, Any]
Returns
-------
Dict
key, values pairs. Values are formatted to shape = (nchain, ndraws, *shape)<|endoftext|> |
2340584654617b779f6537c6615693143394191bc398b59d659e7b21b6807b6e | def from_cmdstan(posterior: Optional[Union[(str, List[str])]]=None, *, posterior_predictive: Optional[Union[(str, List[str])]]=None, predictions: Optional[Union[(str, List[str])]]=None, prior: Optional[Union[(str, List[str])]]=None, prior_predictive: Optional[Union[(str, List[str])]]=None, observed_data: Optional[str]=None, observed_data_var: Optional[Union[(str, List[str])]]=None, constant_data: Optional[str]=None, constant_data_var: Optional[Union[(str, List[str])]]=None, predictions_constant_data: Optional[str]=None, predictions_constant_data_var: Optional[Union[(str, List[str])]]=None, log_likelihood: Optional[Union[(str, List[str])]]=None, index_origin: Optional[int]=None, coords: Optional[CoordSpec]=None, dims: Optional[DimSpec]=None, disable_glob: Optional[bool]=False, save_warmup: Optional[bool]=None, dtypes: Optional[Dict]=None) -> InferenceData:
'Convert CmdStan data into an InferenceData object.\n\n For a usage example read the\n :ref:`Creating InferenceData section on from_cmdstan <creating_InferenceData>`\n\n Parameters\n ----------\n posterior : str or list of str, optional\n List of paths to output.csv files.\n posterior_predictive : str or list of str, optional\n Posterior predictive samples for the fit. If endswith ".csv" assumes file.\n predictions : str or list of str, optional\n Out of sample predictions samples for the fit. If endswith ".csv" assumes file.\n prior : str or list of str, optional\n List of paths to output.csv files\n prior_predictive : str or list of str, optional\n Prior predictive samples for the fit. If endswith ".csv" assumes file.\n observed_data : str, optional\n Observed data used in the sampling. Path to data file in Rdump or JSON format.\n observed_data_var : str or list of str, optional\n Variable(s) used for slicing observed_data. If not defined, all\n data variables are imported.\n constant_data : str, optional\n Constant data used in the sampling. Path to data file in Rdump or JSON format.\n constant_data_var : str or list of str, optional\n Variable(s) used for slicing constant_data. If not defined, all\n data variables are imported.\n predictions_constant_data : str, optional\n Constant data for predictions used in the sampling.\n Path to data file in Rdump or JSON format.\n predictions_constant_data_var : str or list of str, optional\n Variable(s) used for slicing predictions_constant_data.\n If not defined, all data variables are imported.\n log_likelihood : str or list of str, optional\n Pointwise log_likelihood for the data.\n index_origin : int, optional\n Starting value of integer coordinate values. Defaults to the value in rcParam\n ``data.index_origin``.\n coords : dict of {str: array_like}, optional\n A dictionary containing the values that are used as index. The key\n is the name of the dimension, the values are the index values.\n dims : dict of {str: list of str}, optional\n A mapping from variables to a list of coordinate names for the variable.\n disable_glob : bool\n Don\'t use glob for string input. This means that all string input is\n assumed to be variable names (samples) or a path (data).\n save_warmup : bool\n Save warmup iterations into InferenceData object, if found in the input files.\n If not defined, use default defined by the rcParams.\n dtypes : dict or str\n A dictionary containing dtype information (int, float) for parameters.\n If input is a string, it is assumed to be a model code or path to model code file.\n\n Returns\n -------\n InferenceData object\n '
return CmdStanConverter(posterior=posterior, posterior_predictive=posterior_predictive, predictions=predictions, prior=prior, prior_predictive=prior_predictive, observed_data=observed_data, observed_data_var=observed_data_var, constant_data=constant_data, constant_data_var=constant_data_var, predictions_constant_data=predictions_constant_data, predictions_constant_data_var=predictions_constant_data_var, log_likelihood=log_likelihood, index_origin=index_origin, coords=coords, dims=dims, disable_glob=disable_glob, save_warmup=save_warmup, dtypes=dtypes).to_inference_data() | Convert CmdStan data into an InferenceData object.
For a usage example read the
:ref:`Creating InferenceData section on from_cmdstan <creating_InferenceData>`
Parameters
----------
posterior : str or list of str, optional
List of paths to output.csv files.
posterior_predictive : str or list of str, optional
Posterior predictive samples for the fit. If endswith ".csv" assumes file.
predictions : str or list of str, optional
Out of sample predictions samples for the fit. If endswith ".csv" assumes file.
prior : str or list of str, optional
List of paths to output.csv files
prior_predictive : str or list of str, optional
Prior predictive samples for the fit. If endswith ".csv" assumes file.
observed_data : str, optional
Observed data used in the sampling. Path to data file in Rdump or JSON format.
observed_data_var : str or list of str, optional
Variable(s) used for slicing observed_data. If not defined, all
data variables are imported.
constant_data : str, optional
Constant data used in the sampling. Path to data file in Rdump or JSON format.
constant_data_var : str or list of str, optional
Variable(s) used for slicing constant_data. If not defined, all
data variables are imported.
predictions_constant_data : str, optional
Constant data for predictions used in the sampling.
Path to data file in Rdump or JSON format.
predictions_constant_data_var : str or list of str, optional
Variable(s) used for slicing predictions_constant_data.
If not defined, all data variables are imported.
log_likelihood : str or list of str, optional
Pointwise log_likelihood for the data.
index_origin : int, optional
Starting value of integer coordinate values. Defaults to the value in rcParam
``data.index_origin``.
coords : dict of {str: array_like}, optional
A dictionary containing the values that are used as index. The key
is the name of the dimension, the values are the index values.
dims : dict of {str: list of str}, optional
A mapping from variables to a list of coordinate names for the variable.
disable_glob : bool
Don't use glob for string input. This means that all string input is
assumed to be variable names (samples) or a path (data).
save_warmup : bool
Save warmup iterations into InferenceData object, if found in the input files.
If not defined, use default defined by the rcParams.
dtypes : dict or str
A dictionary containing dtype information (int, float) for parameters.
If input is a string, it is assumed to be a model code or path to model code file.
Returns
-------
InferenceData object | arviz/data/io_cmdstan.py | from_cmdstan | peterroelants/arviz | 1,159 | python | def from_cmdstan(posterior: Optional[Union[(str, List[str])]]=None, *, posterior_predictive: Optional[Union[(str, List[str])]]=None, predictions: Optional[Union[(str, List[str])]]=None, prior: Optional[Union[(str, List[str])]]=None, prior_predictive: Optional[Union[(str, List[str])]]=None, observed_data: Optional[str]=None, observed_data_var: Optional[Union[(str, List[str])]]=None, constant_data: Optional[str]=None, constant_data_var: Optional[Union[(str, List[str])]]=None, predictions_constant_data: Optional[str]=None, predictions_constant_data_var: Optional[Union[(str, List[str])]]=None, log_likelihood: Optional[Union[(str, List[str])]]=None, index_origin: Optional[int]=None, coords: Optional[CoordSpec]=None, dims: Optional[DimSpec]=None, disable_glob: Optional[bool]=False, save_warmup: Optional[bool]=None, dtypes: Optional[Dict]=None) -> InferenceData:
'Convert CmdStan data into an InferenceData object.\n\n For a usage example read the\n :ref:`Creating InferenceData section on from_cmdstan <creating_InferenceData>`\n\n Parameters\n ----------\n posterior : str or list of str, optional\n List of paths to output.csv files.\n posterior_predictive : str or list of str, optional\n Posterior predictive samples for the fit. If endswith ".csv" assumes file.\n predictions : str or list of str, optional\n Out of sample predictions samples for the fit. If endswith ".csv" assumes file.\n prior : str or list of str, optional\n List of paths to output.csv files\n prior_predictive : str or list of str, optional\n Prior predictive samples for the fit. If endswith ".csv" assumes file.\n observed_data : str, optional\n Observed data used in the sampling. Path to data file in Rdump or JSON format.\n observed_data_var : str or list of str, optional\n Variable(s) used for slicing observed_data. If not defined, all\n data variables are imported.\n constant_data : str, optional\n Constant data used in the sampling. Path to data file in Rdump or JSON format.\n constant_data_var : str or list of str, optional\n Variable(s) used for slicing constant_data. If not defined, all\n data variables are imported.\n predictions_constant_data : str, optional\n Constant data for predictions used in the sampling.\n Path to data file in Rdump or JSON format.\n predictions_constant_data_var : str or list of str, optional\n Variable(s) used for slicing predictions_constant_data.\n If not defined, all data variables are imported.\n log_likelihood : str or list of str, optional\n Pointwise log_likelihood for the data.\n index_origin : int, optional\n Starting value of integer coordinate values. Defaults to the value in rcParam\n ``data.index_origin``.\n coords : dict of {str: array_like}, optional\n A dictionary containing the values that are used as index. The key\n is the name of the dimension, the values are the index values.\n dims : dict of {str: list of str}, optional\n A mapping from variables to a list of coordinate names for the variable.\n disable_glob : bool\n Don\'t use glob for string input. This means that all string input is\n assumed to be variable names (samples) or a path (data).\n save_warmup : bool\n Save warmup iterations into InferenceData object, if found in the input files.\n If not defined, use default defined by the rcParams.\n dtypes : dict or str\n A dictionary containing dtype information (int, float) for parameters.\n If input is a string, it is assumed to be a model code or path to model code file.\n\n Returns\n -------\n InferenceData object\n '
return CmdStanConverter(posterior=posterior, posterior_predictive=posterior_predictive, predictions=predictions, prior=prior, prior_predictive=prior_predictive, observed_data=observed_data, observed_data_var=observed_data_var, constant_data=constant_data, constant_data_var=constant_data_var, predictions_constant_data=predictions_constant_data, predictions_constant_data_var=predictions_constant_data_var, log_likelihood=log_likelihood, index_origin=index_origin, coords=coords, dims=dims, disable_glob=disable_glob, save_warmup=save_warmup, dtypes=dtypes).to_inference_data() | def from_cmdstan(posterior: Optional[Union[(str, List[str])]]=None, *, posterior_predictive: Optional[Union[(str, List[str])]]=None, predictions: Optional[Union[(str, List[str])]]=None, prior: Optional[Union[(str, List[str])]]=None, prior_predictive: Optional[Union[(str, List[str])]]=None, observed_data: Optional[str]=None, observed_data_var: Optional[Union[(str, List[str])]]=None, constant_data: Optional[str]=None, constant_data_var: Optional[Union[(str, List[str])]]=None, predictions_constant_data: Optional[str]=None, predictions_constant_data_var: Optional[Union[(str, List[str])]]=None, log_likelihood: Optional[Union[(str, List[str])]]=None, index_origin: Optional[int]=None, coords: Optional[CoordSpec]=None, dims: Optional[DimSpec]=None, disable_glob: Optional[bool]=False, save_warmup: Optional[bool]=None, dtypes: Optional[Dict]=None) -> InferenceData:
'Convert CmdStan data into an InferenceData object.\n\n For a usage example read the\n :ref:`Creating InferenceData section on from_cmdstan <creating_InferenceData>`\n\n Parameters\n ----------\n posterior : str or list of str, optional\n List of paths to output.csv files.\n posterior_predictive : str or list of str, optional\n Posterior predictive samples for the fit. If endswith ".csv" assumes file.\n predictions : str or list of str, optional\n Out of sample predictions samples for the fit. If endswith ".csv" assumes file.\n prior : str or list of str, optional\n List of paths to output.csv files\n prior_predictive : str or list of str, optional\n Prior predictive samples for the fit. If endswith ".csv" assumes file.\n observed_data : str, optional\n Observed data used in the sampling. Path to data file in Rdump or JSON format.\n observed_data_var : str or list of str, optional\n Variable(s) used for slicing observed_data. If not defined, all\n data variables are imported.\n constant_data : str, optional\n Constant data used in the sampling. Path to data file in Rdump or JSON format.\n constant_data_var : str or list of str, optional\n Variable(s) used for slicing constant_data. If not defined, all\n data variables are imported.\n predictions_constant_data : str, optional\n Constant data for predictions used in the sampling.\n Path to data file in Rdump or JSON format.\n predictions_constant_data_var : str or list of str, optional\n Variable(s) used for slicing predictions_constant_data.\n If not defined, all data variables are imported.\n log_likelihood : str or list of str, optional\n Pointwise log_likelihood for the data.\n index_origin : int, optional\n Starting value of integer coordinate values. Defaults to the value in rcParam\n ``data.index_origin``.\n coords : dict of {str: array_like}, optional\n A dictionary containing the values that are used as index. The key\n is the name of the dimension, the values are the index values.\n dims : dict of {str: list of str}, optional\n A mapping from variables to a list of coordinate names for the variable.\n disable_glob : bool\n Don\'t use glob for string input. This means that all string input is\n assumed to be variable names (samples) or a path (data).\n save_warmup : bool\n Save warmup iterations into InferenceData object, if found in the input files.\n If not defined, use default defined by the rcParams.\n dtypes : dict or str\n A dictionary containing dtype information (int, float) for parameters.\n If input is a string, it is assumed to be a model code or path to model code file.\n\n Returns\n -------\n InferenceData object\n '
return CmdStanConverter(posterior=posterior, posterior_predictive=posterior_predictive, predictions=predictions, prior=prior, prior_predictive=prior_predictive, observed_data=observed_data, observed_data_var=observed_data_var, constant_data=constant_data, constant_data_var=constant_data_var, predictions_constant_data=predictions_constant_data, predictions_constant_data_var=predictions_constant_data_var, log_likelihood=log_likelihood, index_origin=index_origin, coords=coords, dims=dims, disable_glob=disable_glob, save_warmup=save_warmup, dtypes=dtypes).to_inference_data()<|docstring|>Convert CmdStan data into an InferenceData object.
For a usage example read the
:ref:`Creating InferenceData section on from_cmdstan <creating_InferenceData>`
Parameters
----------
posterior : str or list of str, optional
List of paths to output.csv files.
posterior_predictive : str or list of str, optional
Posterior predictive samples for the fit. If endswith ".csv" assumes file.
predictions : str or list of str, optional
Out of sample predictions samples for the fit. If endswith ".csv" assumes file.
prior : str or list of str, optional
List of paths to output.csv files
prior_predictive : str or list of str, optional
Prior predictive samples for the fit. If endswith ".csv" assumes file.
observed_data : str, optional
Observed data used in the sampling. Path to data file in Rdump or JSON format.
observed_data_var : str or list of str, optional
Variable(s) used for slicing observed_data. If not defined, all
data variables are imported.
constant_data : str, optional
Constant data used in the sampling. Path to data file in Rdump or JSON format.
constant_data_var : str or list of str, optional
Variable(s) used for slicing constant_data. If not defined, all
data variables are imported.
predictions_constant_data : str, optional
Constant data for predictions used in the sampling.
Path to data file in Rdump or JSON format.
predictions_constant_data_var : str or list of str, optional
Variable(s) used for slicing predictions_constant_data.
If not defined, all data variables are imported.
log_likelihood : str or list of str, optional
Pointwise log_likelihood for the data.
index_origin : int, optional
Starting value of integer coordinate values. Defaults to the value in rcParam
``data.index_origin``.
coords : dict of {str: array_like}, optional
A dictionary containing the values that are used as index. The key
is the name of the dimension, the values are the index values.
dims : dict of {str: list of str}, optional
A mapping from variables to a list of coordinate names for the variable.
disable_glob : bool
Don't use glob for string input. This means that all string input is
assumed to be variable names (samples) or a path (data).
save_warmup : bool
Save warmup iterations into InferenceData object, if found in the input files.
If not defined, use default defined by the rcParams.
dtypes : dict or str
A dictionary containing dtype information (int, float) for parameters.
If input is a string, it is assumed to be a model code or path to model code file.
Returns
-------
InferenceData object<|endoftext|> |
3c8044a03e0fe299f6e8a0107e99bbb79f72c723316c363d70184f7468886497 | @requires('posterior_')
def _parse_posterior(self):
'Read csv paths to list of ndarrays.'
paths = self.posterior_
if isinstance(paths, str):
paths = [paths]
chain_data = []
columns = None
for path in paths:
output_data = _read_output(path)
chain_data.append(output_data)
if (columns is None):
columns = output_data
self.posterior = ([item['sample'] for item in chain_data], [item['sample_warmup'] for item in chain_data])
self.posterior_columns = columns['sample_columns']
self.sample_stats_columns = columns['sample_stats_columns']
attrs = {}
for item in chain_data:
for (key, value) in item['configuration_info'].items():
if (key not in attrs):
attrs[key] = []
attrs[key].append(value)
self.attrs = attrs | Read csv paths to list of ndarrays. | arviz/data/io_cmdstan.py | _parse_posterior | peterroelants/arviz | 1,159 | python | @requires('posterior_')
def _parse_posterior(self):
paths = self.posterior_
if isinstance(paths, str):
paths = [paths]
chain_data = []
columns = None
for path in paths:
output_data = _read_output(path)
chain_data.append(output_data)
if (columns is None):
columns = output_data
self.posterior = ([item['sample'] for item in chain_data], [item['sample_warmup'] for item in chain_data])
self.posterior_columns = columns['sample_columns']
self.sample_stats_columns = columns['sample_stats_columns']
attrs = {}
for item in chain_data:
for (key, value) in item['configuration_info'].items():
if (key not in attrs):
attrs[key] = []
attrs[key].append(value)
self.attrs = attrs | @requires('posterior_')
def _parse_posterior(self):
paths = self.posterior_
if isinstance(paths, str):
paths = [paths]
chain_data = []
columns = None
for path in paths:
output_data = _read_output(path)
chain_data.append(output_data)
if (columns is None):
columns = output_data
self.posterior = ([item['sample'] for item in chain_data], [item['sample_warmup'] for item in chain_data])
self.posterior_columns = columns['sample_columns']
self.sample_stats_columns = columns['sample_stats_columns']
attrs = {}
for item in chain_data:
for (key, value) in item['configuration_info'].items():
if (key not in attrs):
attrs[key] = []
attrs[key].append(value)
self.attrs = attrs<|docstring|>Read csv paths to list of ndarrays.<|endoftext|> |
db32f2268981240615f38a256367a8891bd132d46f39815f7757128f98717e4d | @requires('prior_')
def _parse_prior(self):
'Read csv paths to list of ndarrays.'
paths = self.prior_
if isinstance(paths, str):
paths = [paths]
chain_data = []
columns = None
for path in paths:
output_data = _read_output(path)
chain_data.append(output_data)
if (columns is None):
columns = output_data
self.prior = ([item['sample'] for item in chain_data], [item['sample_warmup'] for item in chain_data])
self.prior_columns = columns['sample_columns']
self.sample_stats_prior_columns = columns['sample_stats_columns']
attrs = {}
for item in chain_data:
for (key, value) in item['configuration_info'].items():
if (key not in attrs):
attrs[key] = []
attrs[key].append(value)
self.attrs_prior = attrs | Read csv paths to list of ndarrays. | arviz/data/io_cmdstan.py | _parse_prior | peterroelants/arviz | 1,159 | python | @requires('prior_')
def _parse_prior(self):
paths = self.prior_
if isinstance(paths, str):
paths = [paths]
chain_data = []
columns = None
for path in paths:
output_data = _read_output(path)
chain_data.append(output_data)
if (columns is None):
columns = output_data
self.prior = ([item['sample'] for item in chain_data], [item['sample_warmup'] for item in chain_data])
self.prior_columns = columns['sample_columns']
self.sample_stats_prior_columns = columns['sample_stats_columns']
attrs = {}
for item in chain_data:
for (key, value) in item['configuration_info'].items():
if (key not in attrs):
attrs[key] = []
attrs[key].append(value)
self.attrs_prior = attrs | @requires('prior_')
def _parse_prior(self):
paths = self.prior_
if isinstance(paths, str):
paths = [paths]
chain_data = []
columns = None
for path in paths:
output_data = _read_output(path)
chain_data.append(output_data)
if (columns is None):
columns = output_data
self.prior = ([item['sample'] for item in chain_data], [item['sample_warmup'] for item in chain_data])
self.prior_columns = columns['sample_columns']
self.sample_stats_prior_columns = columns['sample_stats_columns']
attrs = {}
for item in chain_data:
for (key, value) in item['configuration_info'].items():
if (key not in attrs):
attrs[key] = []
attrs[key].append(value)
self.attrs_prior = attrs<|docstring|>Read csv paths to list of ndarrays.<|endoftext|> |
f7fb10783911433b245bb63f57813d30b25b90eb8edc3b60a4c99b2cc88a7c31 | @requires('posterior')
def posterior_to_xarray(self):
'Extract posterior samples from output csv.'
columns = self.posterior_columns
posterior_predictive = self.posterior_predictive
if ((posterior_predictive is None) or (isinstance(posterior_predictive, str) and posterior_predictive.lower().endswith('.csv'))):
posterior_predictive = []
elif isinstance(posterior_predictive, str):
posterior_predictive = [col for col in columns if (posterior_predictive == col.split('.')[0])]
else:
posterior_predictive = [col for col in columns if any(((item == col.split('.')[0]) for item in posterior_predictive))]
predictions = self.predictions
if ((predictions is None) or (isinstance(predictions, str) and predictions.lower().endswith('.csv'))):
predictions = []
elif isinstance(predictions, str):
predictions = [col for col in columns if (predictions == col.split('.')[0])]
else:
predictions = [col for col in columns if any(((item == col.split('.')[0]) for item in predictions))]
log_likelihood = self.log_likelihood
if ((log_likelihood is None) or (isinstance(log_likelihood, str) and log_likelihood.lower().endswith('.csv'))):
log_likelihood = []
elif isinstance(log_likelihood, str):
log_likelihood = [col for col in columns if (log_likelihood == col.split('.')[0])]
else:
log_likelihood = [col for col in columns if any(((item == col.split('.')[0]) for item in log_likelihood))]
invalid_cols = ((posterior_predictive + predictions) + log_likelihood)
valid_cols = {col: idx for (col, idx) in columns.items() if (col not in invalid_cols)}
data = _unpack_ndarrays(self.posterior[0], valid_cols, self.dtypes)
data_warmup = _unpack_ndarrays(self.posterior[1], valid_cols, self.dtypes)
return (dict_to_dataset(data, coords=self.coords, dims=self.dims, attrs=self.attrs, index_origin=self.index_origin), dict_to_dataset(data_warmup, coords=self.coords, dims=self.dims, attrs=self.attrs, index_origin=self.index_origin)) | Extract posterior samples from output csv. | arviz/data/io_cmdstan.py | posterior_to_xarray | peterroelants/arviz | 1,159 | python | @requires('posterior')
def posterior_to_xarray(self):
columns = self.posterior_columns
posterior_predictive = self.posterior_predictive
if ((posterior_predictive is None) or (isinstance(posterior_predictive, str) and posterior_predictive.lower().endswith('.csv'))):
posterior_predictive = []
elif isinstance(posterior_predictive, str):
posterior_predictive = [col for col in columns if (posterior_predictive == col.split('.')[0])]
else:
posterior_predictive = [col for col in columns if any(((item == col.split('.')[0]) for item in posterior_predictive))]
predictions = self.predictions
if ((predictions is None) or (isinstance(predictions, str) and predictions.lower().endswith('.csv'))):
predictions = []
elif isinstance(predictions, str):
predictions = [col for col in columns if (predictions == col.split('.')[0])]
else:
predictions = [col for col in columns if any(((item == col.split('.')[0]) for item in predictions))]
log_likelihood = self.log_likelihood
if ((log_likelihood is None) or (isinstance(log_likelihood, str) and log_likelihood.lower().endswith('.csv'))):
log_likelihood = []
elif isinstance(log_likelihood, str):
log_likelihood = [col for col in columns if (log_likelihood == col.split('.')[0])]
else:
log_likelihood = [col for col in columns if any(((item == col.split('.')[0]) for item in log_likelihood))]
invalid_cols = ((posterior_predictive + predictions) + log_likelihood)
valid_cols = {col: idx for (col, idx) in columns.items() if (col not in invalid_cols)}
data = _unpack_ndarrays(self.posterior[0], valid_cols, self.dtypes)
data_warmup = _unpack_ndarrays(self.posterior[1], valid_cols, self.dtypes)
return (dict_to_dataset(data, coords=self.coords, dims=self.dims, attrs=self.attrs, index_origin=self.index_origin), dict_to_dataset(data_warmup, coords=self.coords, dims=self.dims, attrs=self.attrs, index_origin=self.index_origin)) | @requires('posterior')
def posterior_to_xarray(self):
columns = self.posterior_columns
posterior_predictive = self.posterior_predictive
if ((posterior_predictive is None) or (isinstance(posterior_predictive, str) and posterior_predictive.lower().endswith('.csv'))):
posterior_predictive = []
elif isinstance(posterior_predictive, str):
posterior_predictive = [col for col in columns if (posterior_predictive == col.split('.')[0])]
else:
posterior_predictive = [col for col in columns if any(((item == col.split('.')[0]) for item in posterior_predictive))]
predictions = self.predictions
if ((predictions is None) or (isinstance(predictions, str) and predictions.lower().endswith('.csv'))):
predictions = []
elif isinstance(predictions, str):
predictions = [col for col in columns if (predictions == col.split('.')[0])]
else:
predictions = [col for col in columns if any(((item == col.split('.')[0]) for item in predictions))]
log_likelihood = self.log_likelihood
if ((log_likelihood is None) or (isinstance(log_likelihood, str) and log_likelihood.lower().endswith('.csv'))):
log_likelihood = []
elif isinstance(log_likelihood, str):
log_likelihood = [col for col in columns if (log_likelihood == col.split('.')[0])]
else:
log_likelihood = [col for col in columns if any(((item == col.split('.')[0]) for item in log_likelihood))]
invalid_cols = ((posterior_predictive + predictions) + log_likelihood)
valid_cols = {col: idx for (col, idx) in columns.items() if (col not in invalid_cols)}
data = _unpack_ndarrays(self.posterior[0], valid_cols, self.dtypes)
data_warmup = _unpack_ndarrays(self.posterior[1], valid_cols, self.dtypes)
return (dict_to_dataset(data, coords=self.coords, dims=self.dims, attrs=self.attrs, index_origin=self.index_origin), dict_to_dataset(data_warmup, coords=self.coords, dims=self.dims, attrs=self.attrs, index_origin=self.index_origin))<|docstring|>Extract posterior samples from output csv.<|endoftext|> |
55c21975ddc1138b262ada3dc4a554e09a94521d63b8c0e698a869ece4abc1ed | @requires('posterior')
@requires('sample_stats_columns')
def sample_stats_to_xarray(self):
'Extract sample_stats from fit.'
dtypes = {'diverging': bool, 'n_steps': np.int64, 'tree_depth': np.int64, **self.dtypes}
rename_dict = {'divergent': 'diverging', 'n_leapfrog': 'n_steps', 'treedepth': 'tree_depth', 'stepsize': 'step_size', 'accept_stat': 'acceptance_rate'}
columns_new = {}
for (key, idx) in self.sample_stats_columns.items():
name = re.sub('__$', '', key)
name = rename_dict.get(name, name)
columns_new[name] = idx
data = _unpack_ndarrays(self.posterior[0], columns_new, dtypes)
data_warmup = _unpack_ndarrays(self.posterior[1], columns_new, dtypes)
return (dict_to_dataset(data, coords=self.coords, dims=self.dims, attrs={item: key for (key, item) in rename_dict.items()}, index_origin=self.index_origin), dict_to_dataset(data_warmup, coords=self.coords, dims=self.dims, attrs={item: key for (key, item) in rename_dict.items()}, index_origin=self.index_origin)) | Extract sample_stats from fit. | arviz/data/io_cmdstan.py | sample_stats_to_xarray | peterroelants/arviz | 1,159 | python | @requires('posterior')
@requires('sample_stats_columns')
def sample_stats_to_xarray(self):
dtypes = {'diverging': bool, 'n_steps': np.int64, 'tree_depth': np.int64, **self.dtypes}
rename_dict = {'divergent': 'diverging', 'n_leapfrog': 'n_steps', 'treedepth': 'tree_depth', 'stepsize': 'step_size', 'accept_stat': 'acceptance_rate'}
columns_new = {}
for (key, idx) in self.sample_stats_columns.items():
name = re.sub('__$', , key)
name = rename_dict.get(name, name)
columns_new[name] = idx
data = _unpack_ndarrays(self.posterior[0], columns_new, dtypes)
data_warmup = _unpack_ndarrays(self.posterior[1], columns_new, dtypes)
return (dict_to_dataset(data, coords=self.coords, dims=self.dims, attrs={item: key for (key, item) in rename_dict.items()}, index_origin=self.index_origin), dict_to_dataset(data_warmup, coords=self.coords, dims=self.dims, attrs={item: key for (key, item) in rename_dict.items()}, index_origin=self.index_origin)) | @requires('posterior')
@requires('sample_stats_columns')
def sample_stats_to_xarray(self):
dtypes = {'diverging': bool, 'n_steps': np.int64, 'tree_depth': np.int64, **self.dtypes}
rename_dict = {'divergent': 'diverging', 'n_leapfrog': 'n_steps', 'treedepth': 'tree_depth', 'stepsize': 'step_size', 'accept_stat': 'acceptance_rate'}
columns_new = {}
for (key, idx) in self.sample_stats_columns.items():
name = re.sub('__$', , key)
name = rename_dict.get(name, name)
columns_new[name] = idx
data = _unpack_ndarrays(self.posterior[0], columns_new, dtypes)
data_warmup = _unpack_ndarrays(self.posterior[1], columns_new, dtypes)
return (dict_to_dataset(data, coords=self.coords, dims=self.dims, attrs={item: key for (key, item) in rename_dict.items()}, index_origin=self.index_origin), dict_to_dataset(data_warmup, coords=self.coords, dims=self.dims, attrs={item: key for (key, item) in rename_dict.items()}, index_origin=self.index_origin))<|docstring|>Extract sample_stats from fit.<|endoftext|> |
ea95996106ef533ebbbd9c4d8be89962bc66bdb6699f8a0bf3e970d402b1caaa | @requires('posterior')
@requires('posterior_predictive')
def posterior_predictive_to_xarray(self):
'Convert posterior_predictive samples to xarray.'
posterior_predictive = self.posterior_predictive
if ((isinstance(posterior_predictive, (tuple, list)) and posterior_predictive[0].endswith('.csv')) or (isinstance(posterior_predictive, str) and posterior_predictive.endswith('.csv'))):
if isinstance(posterior_predictive, str):
posterior_predictive = [posterior_predictive]
chain_data = []
chain_data_warmup = []
columns = None
attrs = {}
for path in posterior_predictive:
parsed_output = _read_output(path)
chain_data.append(parsed_output['sample'])
chain_data_warmup.append(parsed_output['sample_warmup'])
if (columns is None):
columns = parsed_output['sample_columns']
for (key, value) in parsed_output['configuration_info'].items():
if (key not in attrs):
attrs[key] = []
attrs[key].append(value)
data = _unpack_ndarrays(chain_data, columns, self.dtypes)
data_warmup = _unpack_ndarrays(chain_data_warmup, columns, self.dtypes)
else:
if isinstance(posterior_predictive, str):
posterior_predictive = [posterior_predictive]
columns = {col: idx for (col, idx) in self.posterior_columns.items() if any(((item == col.split('.')[0]) for item in posterior_predictive))}
data = _unpack_ndarrays(self.posterior[0], columns, self.dtypes)
data_warmup = _unpack_ndarrays(self.posterior[1], columns, self.dtypes)
attrs = None
return (dict_to_dataset(data, coords=self.coords, dims=self.dims, attrs=attrs, index_origin=self.index_origin), dict_to_dataset(data_warmup, coords=self.coords, dims=self.dims, attrs=attrs, index_origin=self.index_origin)) | Convert posterior_predictive samples to xarray. | arviz/data/io_cmdstan.py | posterior_predictive_to_xarray | peterroelants/arviz | 1,159 | python | @requires('posterior')
@requires('posterior_predictive')
def posterior_predictive_to_xarray(self):
posterior_predictive = self.posterior_predictive
if ((isinstance(posterior_predictive, (tuple, list)) and posterior_predictive[0].endswith('.csv')) or (isinstance(posterior_predictive, str) and posterior_predictive.endswith('.csv'))):
if isinstance(posterior_predictive, str):
posterior_predictive = [posterior_predictive]
chain_data = []
chain_data_warmup = []
columns = None
attrs = {}
for path in posterior_predictive:
parsed_output = _read_output(path)
chain_data.append(parsed_output['sample'])
chain_data_warmup.append(parsed_output['sample_warmup'])
if (columns is None):
columns = parsed_output['sample_columns']
for (key, value) in parsed_output['configuration_info'].items():
if (key not in attrs):
attrs[key] = []
attrs[key].append(value)
data = _unpack_ndarrays(chain_data, columns, self.dtypes)
data_warmup = _unpack_ndarrays(chain_data_warmup, columns, self.dtypes)
else:
if isinstance(posterior_predictive, str):
posterior_predictive = [posterior_predictive]
columns = {col: idx for (col, idx) in self.posterior_columns.items() if any(((item == col.split('.')[0]) for item in posterior_predictive))}
data = _unpack_ndarrays(self.posterior[0], columns, self.dtypes)
data_warmup = _unpack_ndarrays(self.posterior[1], columns, self.dtypes)
attrs = None
return (dict_to_dataset(data, coords=self.coords, dims=self.dims, attrs=attrs, index_origin=self.index_origin), dict_to_dataset(data_warmup, coords=self.coords, dims=self.dims, attrs=attrs, index_origin=self.index_origin)) | @requires('posterior')
@requires('posterior_predictive')
def posterior_predictive_to_xarray(self):
posterior_predictive = self.posterior_predictive
if ((isinstance(posterior_predictive, (tuple, list)) and posterior_predictive[0].endswith('.csv')) or (isinstance(posterior_predictive, str) and posterior_predictive.endswith('.csv'))):
if isinstance(posterior_predictive, str):
posterior_predictive = [posterior_predictive]
chain_data = []
chain_data_warmup = []
columns = None
attrs = {}
for path in posterior_predictive:
parsed_output = _read_output(path)
chain_data.append(parsed_output['sample'])
chain_data_warmup.append(parsed_output['sample_warmup'])
if (columns is None):
columns = parsed_output['sample_columns']
for (key, value) in parsed_output['configuration_info'].items():
if (key not in attrs):
attrs[key] = []
attrs[key].append(value)
data = _unpack_ndarrays(chain_data, columns, self.dtypes)
data_warmup = _unpack_ndarrays(chain_data_warmup, columns, self.dtypes)
else:
if isinstance(posterior_predictive, str):
posterior_predictive = [posterior_predictive]
columns = {col: idx for (col, idx) in self.posterior_columns.items() if any(((item == col.split('.')[0]) for item in posterior_predictive))}
data = _unpack_ndarrays(self.posterior[0], columns, self.dtypes)
data_warmup = _unpack_ndarrays(self.posterior[1], columns, self.dtypes)
attrs = None
return (dict_to_dataset(data, coords=self.coords, dims=self.dims, attrs=attrs, index_origin=self.index_origin), dict_to_dataset(data_warmup, coords=self.coords, dims=self.dims, attrs=attrs, index_origin=self.index_origin))<|docstring|>Convert posterior_predictive samples to xarray.<|endoftext|> |
47b04e81d97824841e5c14fdc77c9caad6f3c9f8789a879ce397150ea5206930 | @requires('posterior')
@requires('predictions')
def predictions_to_xarray(self):
'Convert out of sample predictions samples to xarray.'
predictions = self.predictions
if ((isinstance(predictions, (tuple, list)) and predictions[0].endswith('.csv')) or (isinstance(predictions, str) and predictions.endswith('.csv'))):
if isinstance(predictions, str):
predictions = [predictions]
chain_data = []
chain_data_warmup = []
columns = None
attrs = {}
for path in predictions:
parsed_output = _read_output(path)
chain_data.append(parsed_output['sample'])
chain_data_warmup.append(parsed_output['sample_warmup'])
if (columns is None):
columns = parsed_output['sample_columns']
for (key, value) in parsed_output['configuration_info'].items():
if (key not in attrs):
attrs[key] = []
attrs[key].append(value)
data = _unpack_ndarrays(chain_data, columns, self.dtypes)
data_warmup = _unpack_ndarrays(chain_data_warmup, columns, self.dtypes)
else:
if isinstance(predictions, str):
predictions = [predictions]
columns = {col: idx for (col, idx) in self.posterior_columns.items() if any(((item == col.split('.')[0]) for item in predictions))}
data = _unpack_ndarrays(self.posterior[0], columns, self.dtypes)
data_warmup = _unpack_ndarrays(self.posterior[1], columns, self.dtypes)
attrs = None
return (dict_to_dataset(data, coords=self.coords, dims=self.dims, attrs=attrs, index_origin=self.index_origin), dict_to_dataset(data_warmup, coords=self.coords, dims=self.dims, attrs=attrs, index_origin=self.index_origin)) | Convert out of sample predictions samples to xarray. | arviz/data/io_cmdstan.py | predictions_to_xarray | peterroelants/arviz | 1,159 | python | @requires('posterior')
@requires('predictions')
def predictions_to_xarray(self):
predictions = self.predictions
if ((isinstance(predictions, (tuple, list)) and predictions[0].endswith('.csv')) or (isinstance(predictions, str) and predictions.endswith('.csv'))):
if isinstance(predictions, str):
predictions = [predictions]
chain_data = []
chain_data_warmup = []
columns = None
attrs = {}
for path in predictions:
parsed_output = _read_output(path)
chain_data.append(parsed_output['sample'])
chain_data_warmup.append(parsed_output['sample_warmup'])
if (columns is None):
columns = parsed_output['sample_columns']
for (key, value) in parsed_output['configuration_info'].items():
if (key not in attrs):
attrs[key] = []
attrs[key].append(value)
data = _unpack_ndarrays(chain_data, columns, self.dtypes)
data_warmup = _unpack_ndarrays(chain_data_warmup, columns, self.dtypes)
else:
if isinstance(predictions, str):
predictions = [predictions]
columns = {col: idx for (col, idx) in self.posterior_columns.items() if any(((item == col.split('.')[0]) for item in predictions))}
data = _unpack_ndarrays(self.posterior[0], columns, self.dtypes)
data_warmup = _unpack_ndarrays(self.posterior[1], columns, self.dtypes)
attrs = None
return (dict_to_dataset(data, coords=self.coords, dims=self.dims, attrs=attrs, index_origin=self.index_origin), dict_to_dataset(data_warmup, coords=self.coords, dims=self.dims, attrs=attrs, index_origin=self.index_origin)) | @requires('posterior')
@requires('predictions')
def predictions_to_xarray(self):
predictions = self.predictions
if ((isinstance(predictions, (tuple, list)) and predictions[0].endswith('.csv')) or (isinstance(predictions, str) and predictions.endswith('.csv'))):
if isinstance(predictions, str):
predictions = [predictions]
chain_data = []
chain_data_warmup = []
columns = None
attrs = {}
for path in predictions:
parsed_output = _read_output(path)
chain_data.append(parsed_output['sample'])
chain_data_warmup.append(parsed_output['sample_warmup'])
if (columns is None):
columns = parsed_output['sample_columns']
for (key, value) in parsed_output['configuration_info'].items():
if (key not in attrs):
attrs[key] = []
attrs[key].append(value)
data = _unpack_ndarrays(chain_data, columns, self.dtypes)
data_warmup = _unpack_ndarrays(chain_data_warmup, columns, self.dtypes)
else:
if isinstance(predictions, str):
predictions = [predictions]
columns = {col: idx for (col, idx) in self.posterior_columns.items() if any(((item == col.split('.')[0]) for item in predictions))}
data = _unpack_ndarrays(self.posterior[0], columns, self.dtypes)
data_warmup = _unpack_ndarrays(self.posterior[1], columns, self.dtypes)
attrs = None
return (dict_to_dataset(data, coords=self.coords, dims=self.dims, attrs=attrs, index_origin=self.index_origin), dict_to_dataset(data_warmup, coords=self.coords, dims=self.dims, attrs=attrs, index_origin=self.index_origin))<|docstring|>Convert out of sample predictions samples to xarray.<|endoftext|> |
b52321d6149f894424dc62d1d7dfa0a798690523d2d2d02069b08a92d4e6ab0a | @requires('posterior')
@requires('log_likelihood')
def log_likelihood_to_xarray(self):
'Convert elementwise log_likelihood samples to xarray.'
log_likelihood = self.log_likelihood
if ((isinstance(log_likelihood, (tuple, list)) and log_likelihood[0].endswith('.csv')) or (isinstance(log_likelihood, str) and log_likelihood.endswith('.csv'))):
if isinstance(log_likelihood, str):
log_likelihood = [log_likelihood]
chain_data = []
chain_data_warmup = []
columns = None
attrs = {}
for path in log_likelihood:
parsed_output = _read_output(path)
chain_data.append(parsed_output['sample'])
chain_data_warmup.append(parsed_output['sample_warmup'])
if (columns is None):
columns = parsed_output['sample_columns']
for (key, value) in parsed_output['configuration_info'].items():
if (key not in attrs):
attrs[key] = []
attrs[key].append(value)
data = _unpack_ndarrays(chain_data, columns, self.dtypes)
data_warmup = _unpack_ndarrays(chain_data_warmup, columns, self.dtypes)
else:
if isinstance(log_likelihood, str):
log_likelihood = [log_likelihood]
columns = {col: idx for (col, idx) in self.posterior_columns.items() if any(((item == col.split('.')[0]) for item in log_likelihood))}
data = _unpack_ndarrays(self.posterior[0], columns, self.dtypes)
data_warmup = _unpack_ndarrays(self.posterior[1], columns, self.dtypes)
attrs = None
return (dict_to_dataset(data, coords=self.coords, dims=self.dims, attrs=attrs, index_origin=self.index_origin, skip_event_dims=True), dict_to_dataset(data_warmup, coords=self.coords, dims=self.dims, attrs=attrs, index_origin=self.index_origin, skip_event_dims=True)) | Convert elementwise log_likelihood samples to xarray. | arviz/data/io_cmdstan.py | log_likelihood_to_xarray | peterroelants/arviz | 1,159 | python | @requires('posterior')
@requires('log_likelihood')
def log_likelihood_to_xarray(self):
log_likelihood = self.log_likelihood
if ((isinstance(log_likelihood, (tuple, list)) and log_likelihood[0].endswith('.csv')) or (isinstance(log_likelihood, str) and log_likelihood.endswith('.csv'))):
if isinstance(log_likelihood, str):
log_likelihood = [log_likelihood]
chain_data = []
chain_data_warmup = []
columns = None
attrs = {}
for path in log_likelihood:
parsed_output = _read_output(path)
chain_data.append(parsed_output['sample'])
chain_data_warmup.append(parsed_output['sample_warmup'])
if (columns is None):
columns = parsed_output['sample_columns']
for (key, value) in parsed_output['configuration_info'].items():
if (key not in attrs):
attrs[key] = []
attrs[key].append(value)
data = _unpack_ndarrays(chain_data, columns, self.dtypes)
data_warmup = _unpack_ndarrays(chain_data_warmup, columns, self.dtypes)
else:
if isinstance(log_likelihood, str):
log_likelihood = [log_likelihood]
columns = {col: idx for (col, idx) in self.posterior_columns.items() if any(((item == col.split('.')[0]) for item in log_likelihood))}
data = _unpack_ndarrays(self.posterior[0], columns, self.dtypes)
data_warmup = _unpack_ndarrays(self.posterior[1], columns, self.dtypes)
attrs = None
return (dict_to_dataset(data, coords=self.coords, dims=self.dims, attrs=attrs, index_origin=self.index_origin, skip_event_dims=True), dict_to_dataset(data_warmup, coords=self.coords, dims=self.dims, attrs=attrs, index_origin=self.index_origin, skip_event_dims=True)) | @requires('posterior')
@requires('log_likelihood')
def log_likelihood_to_xarray(self):
log_likelihood = self.log_likelihood
if ((isinstance(log_likelihood, (tuple, list)) and log_likelihood[0].endswith('.csv')) or (isinstance(log_likelihood, str) and log_likelihood.endswith('.csv'))):
if isinstance(log_likelihood, str):
log_likelihood = [log_likelihood]
chain_data = []
chain_data_warmup = []
columns = None
attrs = {}
for path in log_likelihood:
parsed_output = _read_output(path)
chain_data.append(parsed_output['sample'])
chain_data_warmup.append(parsed_output['sample_warmup'])
if (columns is None):
columns = parsed_output['sample_columns']
for (key, value) in parsed_output['configuration_info'].items():
if (key not in attrs):
attrs[key] = []
attrs[key].append(value)
data = _unpack_ndarrays(chain_data, columns, self.dtypes)
data_warmup = _unpack_ndarrays(chain_data_warmup, columns, self.dtypes)
else:
if isinstance(log_likelihood, str):
log_likelihood = [log_likelihood]
columns = {col: idx for (col, idx) in self.posterior_columns.items() if any(((item == col.split('.')[0]) for item in log_likelihood))}
data = _unpack_ndarrays(self.posterior[0], columns, self.dtypes)
data_warmup = _unpack_ndarrays(self.posterior[1], columns, self.dtypes)
attrs = None
return (dict_to_dataset(data, coords=self.coords, dims=self.dims, attrs=attrs, index_origin=self.index_origin, skip_event_dims=True), dict_to_dataset(data_warmup, coords=self.coords, dims=self.dims, attrs=attrs, index_origin=self.index_origin, skip_event_dims=True))<|docstring|>Convert elementwise log_likelihood samples to xarray.<|endoftext|> |
4b5dadc42180b604db2281dbac74c267f9dd108fd0311c00cac70a7b3e70508e | @requires('prior')
def prior_to_xarray(self):
'Convert prior samples to xarray.'
prior_predictive = self.prior_predictive
columns = self.prior_columns
if ((prior_predictive is None) or (isinstance(prior_predictive, str) and prior_predictive.lower().endswith('.csv'))):
prior_predictive = []
elif isinstance(prior_predictive, str):
prior_predictive = [col for col in columns if (prior_predictive == col.split('.')[0])]
else:
prior_predictive = [col for col in columns if any(((item == col.split('.')[0]) for item in prior_predictive))]
invalid_cols = prior_predictive
valid_cols = {col: idx for (col, idx) in columns.items() if (col not in invalid_cols)}
data = _unpack_ndarrays(self.prior[0], valid_cols, self.dtypes)
data_warmup = _unpack_ndarrays(self.prior[1], valid_cols, self.dtypes)
return (dict_to_dataset(data, coords=self.coords, dims=self.dims, attrs=self.attrs_prior, index_origin=self.index_origin), dict_to_dataset(data_warmup, coords=self.coords, dims=self.dims, attrs=self.attrs_prior, index_origin=self.index_origin)) | Convert prior samples to xarray. | arviz/data/io_cmdstan.py | prior_to_xarray | peterroelants/arviz | 1,159 | python | @requires('prior')
def prior_to_xarray(self):
prior_predictive = self.prior_predictive
columns = self.prior_columns
if ((prior_predictive is None) or (isinstance(prior_predictive, str) and prior_predictive.lower().endswith('.csv'))):
prior_predictive = []
elif isinstance(prior_predictive, str):
prior_predictive = [col for col in columns if (prior_predictive == col.split('.')[0])]
else:
prior_predictive = [col for col in columns if any(((item == col.split('.')[0]) for item in prior_predictive))]
invalid_cols = prior_predictive
valid_cols = {col: idx for (col, idx) in columns.items() if (col not in invalid_cols)}
data = _unpack_ndarrays(self.prior[0], valid_cols, self.dtypes)
data_warmup = _unpack_ndarrays(self.prior[1], valid_cols, self.dtypes)
return (dict_to_dataset(data, coords=self.coords, dims=self.dims, attrs=self.attrs_prior, index_origin=self.index_origin), dict_to_dataset(data_warmup, coords=self.coords, dims=self.dims, attrs=self.attrs_prior, index_origin=self.index_origin)) | @requires('prior')
def prior_to_xarray(self):
prior_predictive = self.prior_predictive
columns = self.prior_columns
if ((prior_predictive is None) or (isinstance(prior_predictive, str) and prior_predictive.lower().endswith('.csv'))):
prior_predictive = []
elif isinstance(prior_predictive, str):
prior_predictive = [col for col in columns if (prior_predictive == col.split('.')[0])]
else:
prior_predictive = [col for col in columns if any(((item == col.split('.')[0]) for item in prior_predictive))]
invalid_cols = prior_predictive
valid_cols = {col: idx for (col, idx) in columns.items() if (col not in invalid_cols)}
data = _unpack_ndarrays(self.prior[0], valid_cols, self.dtypes)
data_warmup = _unpack_ndarrays(self.prior[1], valid_cols, self.dtypes)
return (dict_to_dataset(data, coords=self.coords, dims=self.dims, attrs=self.attrs_prior, index_origin=self.index_origin), dict_to_dataset(data_warmup, coords=self.coords, dims=self.dims, attrs=self.attrs_prior, index_origin=self.index_origin))<|docstring|>Convert prior samples to xarray.<|endoftext|> |
19194b99c5d8a4fb6a6f37c7b1bba3e720c384d4c829825468e076f50de85b71 | @requires('prior')
@requires('sample_stats_prior_columns')
def sample_stats_prior_to_xarray(self):
'Extract sample_stats from fit.'
dtypes = {'diverging': bool, 'n_steps': np.int64, 'tree_depth': np.int64, **self.dtypes}
rename_dict = {'divergent': 'diverging', 'n_leapfrog': 'n_steps', 'treedepth': 'tree_depth', 'stepsize': 'step_size', 'accept_stat': 'acceptance_rate'}
columns_new = {}
for (key, idx) in self.sample_stats_prior_columns.items():
name = re.sub('__$', '', key)
name = rename_dict.get(name, name)
columns_new[name] = idx
data = _unpack_ndarrays(self.posterior[0], columns_new, dtypes)
data_warmup = _unpack_ndarrays(self.posterior[1], columns_new, dtypes)
return (dict_to_dataset(data, coords=self.coords, dims=self.dims, attrs={item: key for (key, item) in rename_dict.items()}, index_origin=self.index_origin), dict_to_dataset(data_warmup, coords=self.coords, dims=self.dims, attrs={item: key for (key, item) in rename_dict.items()}, index_origin=self.index_origin)) | Extract sample_stats from fit. | arviz/data/io_cmdstan.py | sample_stats_prior_to_xarray | peterroelants/arviz | 1,159 | python | @requires('prior')
@requires('sample_stats_prior_columns')
def sample_stats_prior_to_xarray(self):
dtypes = {'diverging': bool, 'n_steps': np.int64, 'tree_depth': np.int64, **self.dtypes}
rename_dict = {'divergent': 'diverging', 'n_leapfrog': 'n_steps', 'treedepth': 'tree_depth', 'stepsize': 'step_size', 'accept_stat': 'acceptance_rate'}
columns_new = {}
for (key, idx) in self.sample_stats_prior_columns.items():
name = re.sub('__$', , key)
name = rename_dict.get(name, name)
columns_new[name] = idx
data = _unpack_ndarrays(self.posterior[0], columns_new, dtypes)
data_warmup = _unpack_ndarrays(self.posterior[1], columns_new, dtypes)
return (dict_to_dataset(data, coords=self.coords, dims=self.dims, attrs={item: key for (key, item) in rename_dict.items()}, index_origin=self.index_origin), dict_to_dataset(data_warmup, coords=self.coords, dims=self.dims, attrs={item: key for (key, item) in rename_dict.items()}, index_origin=self.index_origin)) | @requires('prior')
@requires('sample_stats_prior_columns')
def sample_stats_prior_to_xarray(self):
dtypes = {'diverging': bool, 'n_steps': np.int64, 'tree_depth': np.int64, **self.dtypes}
rename_dict = {'divergent': 'diverging', 'n_leapfrog': 'n_steps', 'treedepth': 'tree_depth', 'stepsize': 'step_size', 'accept_stat': 'acceptance_rate'}
columns_new = {}
for (key, idx) in self.sample_stats_prior_columns.items():
name = re.sub('__$', , key)
name = rename_dict.get(name, name)
columns_new[name] = idx
data = _unpack_ndarrays(self.posterior[0], columns_new, dtypes)
data_warmup = _unpack_ndarrays(self.posterior[1], columns_new, dtypes)
return (dict_to_dataset(data, coords=self.coords, dims=self.dims, attrs={item: key for (key, item) in rename_dict.items()}, index_origin=self.index_origin), dict_to_dataset(data_warmup, coords=self.coords, dims=self.dims, attrs={item: key for (key, item) in rename_dict.items()}, index_origin=self.index_origin))<|docstring|>Extract sample_stats from fit.<|endoftext|> |
73dde1a0c88fe45749f58d58748a1984422afc69fec024bef11aaacc286103fa | @requires('prior')
@requires('prior_predictive')
def prior_predictive_to_xarray(self):
'Convert prior_predictive samples to xarray.'
prior_predictive = self.prior_predictive
if ((isinstance(prior_predictive, (tuple, list)) and prior_predictive[0].endswith('.csv')) or (isinstance(prior_predictive, str) and prior_predictive.endswith('.csv'))):
if isinstance(prior_predictive, str):
prior_predictive = [prior_predictive]
chain_data = []
chain_data_warmup = []
columns = None
attrs = {}
for path in prior_predictive:
parsed_output = _read_output(path)
chain_data.append(parsed_output['sample'])
chain_data_warmup.append(parsed_output['sample_warmup'])
if (columns is None):
columns = parsed_output['sample_columns']
for (key, value) in parsed_output['configuration_info'].items():
if (key not in attrs):
attrs[key] = []
attrs[key].append(value)
data = _unpack_ndarrays(chain_data, columns, self.dtypes)
data_warmup = _unpack_ndarrays(chain_data_warmup, columns, self.dtypes)
else:
if isinstance(prior_predictive, str):
prior_predictive = [prior_predictive]
columns = {col: idx for (col, idx) in self.prior_columns.items() if any(((item == col.split('.')[0]) for item in prior_predictive))}
data = _unpack_ndarrays(self.prior[0], columns, self.dtypes)
data_warmup = _unpack_ndarrays(self.prior[1], columns, self.dtypes)
attrs = None
return (dict_to_dataset(data, coords=self.coords, dims=self.dims, attrs=attrs, index_origin=self.index_origin), dict_to_dataset(data_warmup, coords=self.coords, dims=self.dims, attrs=attrs, index_origin=self.index_origin)) | Convert prior_predictive samples to xarray. | arviz/data/io_cmdstan.py | prior_predictive_to_xarray | peterroelants/arviz | 1,159 | python | @requires('prior')
@requires('prior_predictive')
def prior_predictive_to_xarray(self):
prior_predictive = self.prior_predictive
if ((isinstance(prior_predictive, (tuple, list)) and prior_predictive[0].endswith('.csv')) or (isinstance(prior_predictive, str) and prior_predictive.endswith('.csv'))):
if isinstance(prior_predictive, str):
prior_predictive = [prior_predictive]
chain_data = []
chain_data_warmup = []
columns = None
attrs = {}
for path in prior_predictive:
parsed_output = _read_output(path)
chain_data.append(parsed_output['sample'])
chain_data_warmup.append(parsed_output['sample_warmup'])
if (columns is None):
columns = parsed_output['sample_columns']
for (key, value) in parsed_output['configuration_info'].items():
if (key not in attrs):
attrs[key] = []
attrs[key].append(value)
data = _unpack_ndarrays(chain_data, columns, self.dtypes)
data_warmup = _unpack_ndarrays(chain_data_warmup, columns, self.dtypes)
else:
if isinstance(prior_predictive, str):
prior_predictive = [prior_predictive]
columns = {col: idx for (col, idx) in self.prior_columns.items() if any(((item == col.split('.')[0]) for item in prior_predictive))}
data = _unpack_ndarrays(self.prior[0], columns, self.dtypes)
data_warmup = _unpack_ndarrays(self.prior[1], columns, self.dtypes)
attrs = None
return (dict_to_dataset(data, coords=self.coords, dims=self.dims, attrs=attrs, index_origin=self.index_origin), dict_to_dataset(data_warmup, coords=self.coords, dims=self.dims, attrs=attrs, index_origin=self.index_origin)) | @requires('prior')
@requires('prior_predictive')
def prior_predictive_to_xarray(self):
prior_predictive = self.prior_predictive
if ((isinstance(prior_predictive, (tuple, list)) and prior_predictive[0].endswith('.csv')) or (isinstance(prior_predictive, str) and prior_predictive.endswith('.csv'))):
if isinstance(prior_predictive, str):
prior_predictive = [prior_predictive]
chain_data = []
chain_data_warmup = []
columns = None
attrs = {}
for path in prior_predictive:
parsed_output = _read_output(path)
chain_data.append(parsed_output['sample'])
chain_data_warmup.append(parsed_output['sample_warmup'])
if (columns is None):
columns = parsed_output['sample_columns']
for (key, value) in parsed_output['configuration_info'].items():
if (key not in attrs):
attrs[key] = []
attrs[key].append(value)
data = _unpack_ndarrays(chain_data, columns, self.dtypes)
data_warmup = _unpack_ndarrays(chain_data_warmup, columns, self.dtypes)
else:
if isinstance(prior_predictive, str):
prior_predictive = [prior_predictive]
columns = {col: idx for (col, idx) in self.prior_columns.items() if any(((item == col.split('.')[0]) for item in prior_predictive))}
data = _unpack_ndarrays(self.prior[0], columns, self.dtypes)
data_warmup = _unpack_ndarrays(self.prior[1], columns, self.dtypes)
attrs = None
return (dict_to_dataset(data, coords=self.coords, dims=self.dims, attrs=attrs, index_origin=self.index_origin), dict_to_dataset(data_warmup, coords=self.coords, dims=self.dims, attrs=attrs, index_origin=self.index_origin))<|docstring|>Convert prior_predictive samples to xarray.<|endoftext|> |
91542c2ec4e891dcffbbe80e908cc3c295df1b93a1d1ffb421979bbd47da8e6c | @requires('observed_data')
def observed_data_to_xarray(self):
'Convert observed data to xarray.'
observed_data_raw = _read_data(self.observed_data)
variables = self.observed_data_var
if isinstance(variables, str):
variables = [variables]
observed_data = {}
for (key, vals) in observed_data_raw.items():
if ((variables is not None) and (key not in variables)):
continue
observed_data[key] = utils.one_de(vals)
return dict_to_dataset(observed_data, coords=self.coords, dims=self.dims, default_dims=[], index_origin=self.index_origin) | Convert observed data to xarray. | arviz/data/io_cmdstan.py | observed_data_to_xarray | peterroelants/arviz | 1,159 | python | @requires('observed_data')
def observed_data_to_xarray(self):
observed_data_raw = _read_data(self.observed_data)
variables = self.observed_data_var
if isinstance(variables, str):
variables = [variables]
observed_data = {}
for (key, vals) in observed_data_raw.items():
if ((variables is not None) and (key not in variables)):
continue
observed_data[key] = utils.one_de(vals)
return dict_to_dataset(observed_data, coords=self.coords, dims=self.dims, default_dims=[], index_origin=self.index_origin) | @requires('observed_data')
def observed_data_to_xarray(self):
observed_data_raw = _read_data(self.observed_data)
variables = self.observed_data_var
if isinstance(variables, str):
variables = [variables]
observed_data = {}
for (key, vals) in observed_data_raw.items():
if ((variables is not None) and (key not in variables)):
continue
observed_data[key] = utils.one_de(vals)
return dict_to_dataset(observed_data, coords=self.coords, dims=self.dims, default_dims=[], index_origin=self.index_origin)<|docstring|>Convert observed data to xarray.<|endoftext|> |
508a061c72eb965e2828ead6bd8e0ff11b9afa4e82d37ff9e8f8a8bab3ae00ed | @requires('constant_data')
def constant_data_to_xarray(self):
'Convert constant data to xarray.'
constant_data_raw = _read_data(self.constant_data)
variables = self.constant_data_var
if isinstance(variables, str):
variables = [variables]
constant_data = {}
for (key, vals) in constant_data_raw.items():
if ((variables is not None) and (key not in variables)):
continue
constant_data[key] = utils.one_de(vals)
return dict_to_dataset(constant_data, coords=self.coords, dims=self.dims, default_dims=[], index_origin=self.index_origin) | Convert constant data to xarray. | arviz/data/io_cmdstan.py | constant_data_to_xarray | peterroelants/arviz | 1,159 | python | @requires('constant_data')
def constant_data_to_xarray(self):
constant_data_raw = _read_data(self.constant_data)
variables = self.constant_data_var
if isinstance(variables, str):
variables = [variables]
constant_data = {}
for (key, vals) in constant_data_raw.items():
if ((variables is not None) and (key not in variables)):
continue
constant_data[key] = utils.one_de(vals)
return dict_to_dataset(constant_data, coords=self.coords, dims=self.dims, default_dims=[], index_origin=self.index_origin) | @requires('constant_data')
def constant_data_to_xarray(self):
constant_data_raw = _read_data(self.constant_data)
variables = self.constant_data_var
if isinstance(variables, str):
variables = [variables]
constant_data = {}
for (key, vals) in constant_data_raw.items():
if ((variables is not None) and (key not in variables)):
continue
constant_data[key] = utils.one_de(vals)
return dict_to_dataset(constant_data, coords=self.coords, dims=self.dims, default_dims=[], index_origin=self.index_origin)<|docstring|>Convert constant data to xarray.<|endoftext|> |
22c6d625e9440cccae938712f3b68ae0ceebf24d5d0057f0ec7b2b30c7088372 | @requires('predictions_constant_data')
def predictions_constant_data_to_xarray(self):
'Convert predictions constant data to xarray.'
predictions_constant_data_raw = _read_data(self.predictions_constant_data)
variables = self.predictions_constant_data_var
if isinstance(variables, str):
variables = [variables]
predictions_constant_data = {}
for (key, vals) in predictions_constant_data_raw.items():
if ((variables is not None) and (key not in variables)):
continue
vals = utils.one_de(vals)
predictions_constant_data[key] = utils.one_de(vals)
return dict_to_dataset(predictions_constant_data, coords=self.coords, dims=self.dims, default_dims=[], index_origin=self.index_origin) | Convert predictions constant data to xarray. | arviz/data/io_cmdstan.py | predictions_constant_data_to_xarray | peterroelants/arviz | 1,159 | python | @requires('predictions_constant_data')
def predictions_constant_data_to_xarray(self):
predictions_constant_data_raw = _read_data(self.predictions_constant_data)
variables = self.predictions_constant_data_var
if isinstance(variables, str):
variables = [variables]
predictions_constant_data = {}
for (key, vals) in predictions_constant_data_raw.items():
if ((variables is not None) and (key not in variables)):
continue
vals = utils.one_de(vals)
predictions_constant_data[key] = utils.one_de(vals)
return dict_to_dataset(predictions_constant_data, coords=self.coords, dims=self.dims, default_dims=[], index_origin=self.index_origin) | @requires('predictions_constant_data')
def predictions_constant_data_to_xarray(self):
predictions_constant_data_raw = _read_data(self.predictions_constant_data)
variables = self.predictions_constant_data_var
if isinstance(variables, str):
variables = [variables]
predictions_constant_data = {}
for (key, vals) in predictions_constant_data_raw.items():
if ((variables is not None) and (key not in variables)):
continue
vals = utils.one_de(vals)
predictions_constant_data[key] = utils.one_de(vals)
return dict_to_dataset(predictions_constant_data, coords=self.coords, dims=self.dims, default_dims=[], index_origin=self.index_origin)<|docstring|>Convert predictions constant data to xarray.<|endoftext|> |
833bfd240c4db4529af7e55f3ab52c658cce1d69b3cdd079e1cf9e1b80a82e03 | def to_inference_data(self):
'Convert all available data to an InferenceData object.\n\n Note that if groups can not be created (i.e., there is no `output`, so\n the `posterior` and `sample_stats` can not be extracted), then the InferenceData\n will not have those groups.\n '
return InferenceData(save_warmup=self.save_warmup, **{'posterior': self.posterior_to_xarray(), 'sample_stats': self.sample_stats_to_xarray(), 'log_likelihood': self.log_likelihood_to_xarray(), 'posterior_predictive': self.posterior_predictive_to_xarray(), 'prior': self.prior_to_xarray(), 'sample_stats_prior': self.sample_stats_prior_to_xarray(), 'prior_predictive': self.prior_predictive_to_xarray(), 'observed_data': self.observed_data_to_xarray(), 'constant_data': self.constant_data_to_xarray(), 'predictions': self.predictions_to_xarray(), 'predictions_constant_data': self.predictions_constant_data_to_xarray()}) | Convert all available data to an InferenceData object.
Note that if groups can not be created (i.e., there is no `output`, so
the `posterior` and `sample_stats` can not be extracted), then the InferenceData
will not have those groups. | arviz/data/io_cmdstan.py | to_inference_data | peterroelants/arviz | 1,159 | python | def to_inference_data(self):
'Convert all available data to an InferenceData object.\n\n Note that if groups can not be created (i.e., there is no `output`, so\n the `posterior` and `sample_stats` can not be extracted), then the InferenceData\n will not have those groups.\n '
return InferenceData(save_warmup=self.save_warmup, **{'posterior': self.posterior_to_xarray(), 'sample_stats': self.sample_stats_to_xarray(), 'log_likelihood': self.log_likelihood_to_xarray(), 'posterior_predictive': self.posterior_predictive_to_xarray(), 'prior': self.prior_to_xarray(), 'sample_stats_prior': self.sample_stats_prior_to_xarray(), 'prior_predictive': self.prior_predictive_to_xarray(), 'observed_data': self.observed_data_to_xarray(), 'constant_data': self.constant_data_to_xarray(), 'predictions': self.predictions_to_xarray(), 'predictions_constant_data': self.predictions_constant_data_to_xarray()}) | def to_inference_data(self):
'Convert all available data to an InferenceData object.\n\n Note that if groups can not be created (i.e., there is no `output`, so\n the `posterior` and `sample_stats` can not be extracted), then the InferenceData\n will not have those groups.\n '
return InferenceData(save_warmup=self.save_warmup, **{'posterior': self.posterior_to_xarray(), 'sample_stats': self.sample_stats_to_xarray(), 'log_likelihood': self.log_likelihood_to_xarray(), 'posterior_predictive': self.posterior_predictive_to_xarray(), 'prior': self.prior_to_xarray(), 'sample_stats_prior': self.sample_stats_prior_to_xarray(), 'prior_predictive': self.prior_predictive_to_xarray(), 'observed_data': self.observed_data_to_xarray(), 'constant_data': self.constant_data_to_xarray(), 'predictions': self.predictions_to_xarray(), 'predictions_constant_data': self.predictions_constant_data_to_xarray()})<|docstring|>Convert all available data to an InferenceData object.
Note that if groups can not be created (i.e., there is no `output`, so
the `posterior` and `sample_stats` can not be extracted), then the InferenceData
will not have those groups.<|endoftext|> |
7f874e196083d49b669e1d3d11c2a7403235769ccafa9d36ff788e8d3874f3f4 | def get_env_var(key, as_type, env):
'Get the environment variable option.\n\n :param key: the config key requested\n :param as_type: the type we would like to convert it to\n :param env: environment variables to use\n :return:\n '
environ_key = ensure_str('VIRTUALENV_{}'.format(key.upper()))
if env.get(environ_key):
value = env[environ_key]
try:
source = 'env var {}'.format(ensure_text(environ_key))
as_type = convert(value, as_type, source)
return (as_type, source)
except Exception:
pass | Get the environment variable option.
:param key: the config key requested
:param as_type: the type we would like to convert it to
:param env: environment variables to use
:return: | .venv/lib/python3.8/site-packages/virtualenv/config/env_var.py | get_env_var | RivtLib/replit01 | 3,522 | python | def get_env_var(key, as_type, env):
'Get the environment variable option.\n\n :param key: the config key requested\n :param as_type: the type we would like to convert it to\n :param env: environment variables to use\n :return:\n '
environ_key = ensure_str('VIRTUALENV_{}'.format(key.upper()))
if env.get(environ_key):
value = env[environ_key]
try:
source = 'env var {}'.format(ensure_text(environ_key))
as_type = convert(value, as_type, source)
return (as_type, source)
except Exception:
pass | def get_env_var(key, as_type, env):
'Get the environment variable option.\n\n :param key: the config key requested\n :param as_type: the type we would like to convert it to\n :param env: environment variables to use\n :return:\n '
environ_key = ensure_str('VIRTUALENV_{}'.format(key.upper()))
if env.get(environ_key):
value = env[environ_key]
try:
source = 'env var {}'.format(ensure_text(environ_key))
as_type = convert(value, as_type, source)
return (as_type, source)
except Exception:
pass<|docstring|>Get the environment variable option.
:param key: the config key requested
:param as_type: the type we would like to convert it to
:param env: environment variables to use
:return:<|endoftext|> |
482cf689cbd474eff83da7295bae1ccb1968a99958a6a4d7ce0cb0324a9bed2a | def on_hover(self, event):
'Altera o ícone do botão quando o cursor entra em sua área.\n\n Se o botão possuí texto de instrução, ele é exibido na barra\n inferior.\n '
if (self['state'] == 'normal'):
self.configure(image=self.icon2)
if (self.hover_text is not None):
cetus.hover_box.configure(text=self.hover_text) | Altera o ícone do botão quando o cursor entra em sua área.
Se o botão possuí texto de instrução, ele é exibido na barra
inferior. | interface.py | on_hover | WilsonCazarre/ProjetoCetus | 4 | python | def on_hover(self, event):
'Altera o ícone do botão quando o cursor entra em sua área.\n\n Se o botão possuí texto de instrução, ele é exibido na barra\n inferior.\n '
if (self['state'] == 'normal'):
self.configure(image=self.icon2)
if (self.hover_text is not None):
cetus.hover_box.configure(text=self.hover_text) | def on_hover(self, event):
'Altera o ícone do botão quando o cursor entra em sua área.\n\n Se o botão possuí texto de instrução, ele é exibido na barra\n inferior.\n '
if (self['state'] == 'normal'):
self.configure(image=self.icon2)
if (self.hover_text is not None):
cetus.hover_box.configure(text=self.hover_text)<|docstring|>Altera o ícone do botão quando o cursor entra em sua área.
Se o botão possuí texto de instrução, ele é exibido na barra
inferior.<|endoftext|> |
6a44cab7145bb5b5888036ce66346ebb8c2fe70b2b733e28539a367c1a8c66d9 | def on_leave(self, event):
'Altera o ícone do botão quando o cursor saí da sua área.'
if (self['state'] == 'normal'):
self.configure(image=self.icon1)
cetus.hover_box.configure(text=std.hover_texts['default']) | Altera o ícone do botão quando o cursor saí da sua área. | interface.py | on_leave | WilsonCazarre/ProjetoCetus | 4 | python | def on_leave(self, event):
if (self['state'] == 'normal'):
self.configure(image=self.icon1)
cetus.hover_box.configure(text=std.hover_texts['default']) | def on_leave(self, event):
if (self['state'] == 'normal'):
self.configure(image=self.icon1)
cetus.hover_box.configure(text=std.hover_texts['default'])<|docstring|>Altera o ícone do botão quando o cursor saí da sua área.<|endoftext|> |
f7e3a3a3869a7f6ea80b6f8fc0b5ceb637b92961a370a0f4b63f85ca62006756 | def on_frame_configure(self, event):
'Reset the scroll region to encompass the inner frame'
self.canvas.configure(scrollregion=self.canvas.bbox('all')) | Reset the scroll region to encompass the inner frame | interface.py | on_frame_configure | WilsonCazarre/ProjetoCetus | 4 | python | def on_frame_configure(self, event):
self.canvas.configure(scrollregion=self.canvas.bbox('all')) | def on_frame_configure(self, event):
self.canvas.configure(scrollregion=self.canvas.bbox('all'))<|docstring|>Reset the scroll region to encompass the inner frame<|endoftext|> |
e2139413e0f81daeb1cbd9e6c20e8d4ff59cfda8b43c6f24f68245ea9b5456e3 | def check_if_is_connected(self):
'Função para verificar alterações na porta serial.\n\n Essa função roda em looping infinito no background da janela\n base.\n '
bt_connected = self.side_buttons['reconnect_icon']
if ((self._frame is not None) and arduino.is_connected):
bt_connected.icon1 = self.connected_icon
bt_connected.icon2 = self.connected_icon
bt_connected.configure(image=self.connected_icon)
elif arduino.waiting_update:
bt_connected.icon1 = tk.PhotoImage(file=std.side_buttons_path['reconnect_icon'])
bt_connected.icon2 = tk.PhotoImage(file=std.side_buttons_path['reconnect_highlight'])
bt_connected.configure(image=bt_connected.icon1)
arduino.waiting_update = False
self.after(1000, self.check_if_is_connected) | Função para verificar alterações na porta serial.
Essa função roda em looping infinito no background da janela
base. | interface.py | check_if_is_connected | WilsonCazarre/ProjetoCetus | 4 | python | def check_if_is_connected(self):
'Função para verificar alterações na porta serial.\n\n Essa função roda em looping infinito no background da janela\n base.\n '
bt_connected = self.side_buttons['reconnect_icon']
if ((self._frame is not None) and arduino.is_connected):
bt_connected.icon1 = self.connected_icon
bt_connected.icon2 = self.connected_icon
bt_connected.configure(image=self.connected_icon)
elif arduino.waiting_update:
bt_connected.icon1 = tk.PhotoImage(file=std.side_buttons_path['reconnect_icon'])
bt_connected.icon2 = tk.PhotoImage(file=std.side_buttons_path['reconnect_highlight'])
bt_connected.configure(image=bt_connected.icon1)
arduino.waiting_update = False
self.after(1000, self.check_if_is_connected) | def check_if_is_connected(self):
'Função para verificar alterações na porta serial.\n\n Essa função roda em looping infinito no background da janela\n base.\n '
bt_connected = self.side_buttons['reconnect_icon']
if ((self._frame is not None) and arduino.is_connected):
bt_connected.icon1 = self.connected_icon
bt_connected.icon2 = self.connected_icon
bt_connected.configure(image=self.connected_icon)
elif arduino.waiting_update:
bt_connected.icon1 = tk.PhotoImage(file=std.side_buttons_path['reconnect_icon'])
bt_connected.icon2 = tk.PhotoImage(file=std.side_buttons_path['reconnect_highlight'])
bt_connected.configure(image=bt_connected.icon1)
arduino.waiting_update = False
self.after(1000, self.check_if_is_connected)<|docstring|>Função para verificar alterações na porta serial.
Essa função roda em looping infinito no background da janela
base.<|endoftext|> |
9992634413209baca739b74b41af91631f1e14ebb9df9fa824ef2d2a33308ef3 | def close_window(self):
'Função para sobrescrever o protocolo padrão ao fechar a janela.\n\n O programa salva todos os experimentos em arquivo externo e depois\n destrói a janela principal encerrando o programa.\n '
fc.save_pickle_file(std.EXP_PATH, fc.experiments)
if arduino.is_connected:
arduino.is_running = False
arduino.is_connected = False
arduino.serial_device.close()
print('Closing serial port.')
self.destroy() | Função para sobrescrever o protocolo padrão ao fechar a janela.
O programa salva todos os experimentos em arquivo externo e depois
destrói a janela principal encerrando o programa. | interface.py | close_window | WilsonCazarre/ProjetoCetus | 4 | python | def close_window(self):
'Função para sobrescrever o protocolo padrão ao fechar a janela.\n\n O programa salva todos os experimentos em arquivo externo e depois\n destrói a janela principal encerrando o programa.\n '
fc.save_pickle_file(std.EXP_PATH, fc.experiments)
if arduino.is_connected:
arduino.is_running = False
arduino.is_connected = False
arduino.serial_device.close()
print('Closing serial port.')
self.destroy() | def close_window(self):
'Função para sobrescrever o protocolo padrão ao fechar a janela.\n\n O programa salva todos os experimentos em arquivo externo e depois\n destrói a janela principal encerrando o programa.\n '
fc.save_pickle_file(std.EXP_PATH, fc.experiments)
if arduino.is_connected:
arduino.is_running = False
arduino.is_connected = False
arduino.serial_device.close()
print('Closing serial port.')
self.destroy()<|docstring|>Função para sobrescrever o protocolo padrão ao fechar a janela.
O programa salva todos os experimentos em arquivo externo e depois
destrói a janela principal encerrando o programa.<|endoftext|> |
07fb34fcf7dedb4472ddb154b4fae11c7411a2c10391edf08424e0588d2d5742 | def switch_frame(self, new_frame, *args, **kwargs):
'Função para trocar o conteúdo exibido pela na janela.\n\n :param new_frame: nova classe ou subclasse da tk.Frame a ser\n exibida.\n '
new_frame = new_frame(self, *args, **kwargs)
if (self._frame is not None):
self._frame.destroy()
self._frame = new_frame
self._frame.pack()
self._frame.create_widgets() | Função para trocar o conteúdo exibido pela na janela.
:param new_frame: nova classe ou subclasse da tk.Frame a ser
exibida. | interface.py | switch_frame | WilsonCazarre/ProjetoCetus | 4 | python | def switch_frame(self, new_frame, *args, **kwargs):
'Função para trocar o conteúdo exibido pela na janela.\n\n :param new_frame: nova classe ou subclasse da tk.Frame a ser\n exibida.\n '
new_frame = new_frame(self, *args, **kwargs)
if (self._frame is not None):
self._frame.destroy()
self._frame = new_frame
self._frame.pack()
self._frame.create_widgets() | def switch_frame(self, new_frame, *args, **kwargs):
'Função para trocar o conteúdo exibido pela na janela.\n\n :param new_frame: nova classe ou subclasse da tk.Frame a ser\n exibida.\n '
new_frame = new_frame(self, *args, **kwargs)
if (self._frame is not None):
self._frame.destroy()
self._frame = new_frame
self._frame.pack()
self._frame.create_widgets()<|docstring|>Função para trocar o conteúdo exibido pela na janela.
:param new_frame: nova classe ou subclasse da tk.Frame a ser
exibida.<|endoftext|> |
ad0493348c58cd0f4ca058c579084e8be7e50578606ad2a9f92305bda479cff7 | def _widgets(self):
'Cria os widgets específicos da janela.\n\n Esses widgets são colocados em outro método pois eles não podem\n herdados pelas outras janelas. O método é sobrescrito em cada\n nova sub-classe.\n '
self.logo = tk.PhotoImage(file=std.LOGO_IMAGE_PATH)
self.logo_bg = tk.Label(master=self, image=self.logo, bg=std.BG)
self.logo_bg.place(x=550, y=200)
self.buttons_frame = tk.Frame(master=self, width=850, height=120, bg=std.BG, bd=0, highlightcolor=std.BD, highlightbackground=std.BD, highlightthickness=std.BD_WIDTH)
self.buttons_frame.place(anchor='n', rely=0.3, relx=0.5, y=35)
self.buttons_frame.pack_propagate(False)
self.buttons = {}
for but in std.cetuspcr_buttons_path:
if ('_icon' in but):
self.path_slice = but.split('_')
self.b_name = self.path_slice[0]
self.path1 = std.cetuspcr_buttons_path[but]
self.path2 = std.cetuspcr_buttons_path[f'{self.path_slice[0]}_highlight']
self.new_button = AnimatedButton(master=self.buttons_frame, image1=self.path1, image2=self.path2, activebackground=std.BG, width=75, bd=0, bg=std.BG, highlightthickness=0, hover_text=std.hover_texts[self.b_name])
self.buttons[but] = self.new_button
self.buttons[but].pack(side='right', padx=8)
self.buttons['confirm_icon'].configure(command=self.handle_confirm_button)
self.buttons['add_icon'].configure(command=self.handle_new_button)
self.buttons['delete_icon'].configure(command=self.handle_delete_button)
self.experiment_combo = ttk.Combobox(master=self.buttons_frame, width=35, font=(std.FONT_TITLE, 17))
self.experiment_combo.place(rely=0.55, relx=0.02, anchor='w', bordermode='inside')
self.experiment_combo_title = tk.Label(master=self, font=(std.FONT_TITLE, 22, 'bold'), text='Selecione o experimento:', fg=std.TEXTS_COLOR, bg=std.BG)
self.experiment_combo_title.place(in_=self.experiment_combo, anchor='sw', bordermode='outside')
self.show_experiments() | Cria os widgets específicos da janela.
Esses widgets são colocados em outro método pois eles não podem
herdados pelas outras janelas. O método é sobrescrito em cada
nova sub-classe. | interface.py | _widgets | WilsonCazarre/ProjetoCetus | 4 | python | def _widgets(self):
'Cria os widgets específicos da janela.\n\n Esses widgets são colocados em outro método pois eles não podem\n herdados pelas outras janelas. O método é sobrescrito em cada\n nova sub-classe.\n '
self.logo = tk.PhotoImage(file=std.LOGO_IMAGE_PATH)
self.logo_bg = tk.Label(master=self, image=self.logo, bg=std.BG)
self.logo_bg.place(x=550, y=200)
self.buttons_frame = tk.Frame(master=self, width=850, height=120, bg=std.BG, bd=0, highlightcolor=std.BD, highlightbackground=std.BD, highlightthickness=std.BD_WIDTH)
self.buttons_frame.place(anchor='n', rely=0.3, relx=0.5, y=35)
self.buttons_frame.pack_propagate(False)
self.buttons = {}
for but in std.cetuspcr_buttons_path:
if ('_icon' in but):
self.path_slice = but.split('_')
self.b_name = self.path_slice[0]
self.path1 = std.cetuspcr_buttons_path[but]
self.path2 = std.cetuspcr_buttons_path[f'{self.path_slice[0]}_highlight']
self.new_button = AnimatedButton(master=self.buttons_frame, image1=self.path1, image2=self.path2, activebackground=std.BG, width=75, bd=0, bg=std.BG, highlightthickness=0, hover_text=std.hover_texts[self.b_name])
self.buttons[but] = self.new_button
self.buttons[but].pack(side='right', padx=8)
self.buttons['confirm_icon'].configure(command=self.handle_confirm_button)
self.buttons['add_icon'].configure(command=self.handle_new_button)
self.buttons['delete_icon'].configure(command=self.handle_delete_button)
self.experiment_combo = ttk.Combobox(master=self.buttons_frame, width=35, font=(std.FONT_TITLE, 17))
self.experiment_combo.place(rely=0.55, relx=0.02, anchor='w', bordermode='inside')
self.experiment_combo_title = tk.Label(master=self, font=(std.FONT_TITLE, 22, 'bold'), text='Selecione o experimento:', fg=std.TEXTS_COLOR, bg=std.BG)
self.experiment_combo_title.place(in_=self.experiment_combo, anchor='sw', bordermode='outside')
self.show_experiments() | def _widgets(self):
'Cria os widgets específicos da janela.\n\n Esses widgets são colocados em outro método pois eles não podem\n herdados pelas outras janelas. O método é sobrescrito em cada\n nova sub-classe.\n '
self.logo = tk.PhotoImage(file=std.LOGO_IMAGE_PATH)
self.logo_bg = tk.Label(master=self, image=self.logo, bg=std.BG)
self.logo_bg.place(x=550, y=200)
self.buttons_frame = tk.Frame(master=self, width=850, height=120, bg=std.BG, bd=0, highlightcolor=std.BD, highlightbackground=std.BD, highlightthickness=std.BD_WIDTH)
self.buttons_frame.place(anchor='n', rely=0.3, relx=0.5, y=35)
self.buttons_frame.pack_propagate(False)
self.buttons = {}
for but in std.cetuspcr_buttons_path:
if ('_icon' in but):
self.path_slice = but.split('_')
self.b_name = self.path_slice[0]
self.path1 = std.cetuspcr_buttons_path[but]
self.path2 = std.cetuspcr_buttons_path[f'{self.path_slice[0]}_highlight']
self.new_button = AnimatedButton(master=self.buttons_frame, image1=self.path1, image2=self.path2, activebackground=std.BG, width=75, bd=0, bg=std.BG, highlightthickness=0, hover_text=std.hover_texts[self.b_name])
self.buttons[but] = self.new_button
self.buttons[but].pack(side='right', padx=8)
self.buttons['confirm_icon'].configure(command=self.handle_confirm_button)
self.buttons['add_icon'].configure(command=self.handle_new_button)
self.buttons['delete_icon'].configure(command=self.handle_delete_button)
self.experiment_combo = ttk.Combobox(master=self.buttons_frame, width=35, font=(std.FONT_TITLE, 17))
self.experiment_combo.place(rely=0.55, relx=0.02, anchor='w', bordermode='inside')
self.experiment_combo_title = tk.Label(master=self, font=(std.FONT_TITLE, 22, 'bold'), text='Selecione o experimento:', fg=std.TEXTS_COLOR, bg=std.BG)
self.experiment_combo_title.place(in_=self.experiment_combo, anchor='sw', bordermode='outside')
self.show_experiments()<|docstring|>Cria os widgets específicos da janela.
Esses widgets são colocados em outro método pois eles não podem
herdados pelas outras janelas. O método é sobrescrito em cada
nova sub-classe.<|endoftext|> |
5c0464b9ef6d3ac56bdc6e1ec0bd8c4652f5fa6464a74d1f69e6d1ebd8c0cb51 | def show_experiments(self):
'Abre o arquivo com os experimentos salvos e os exibe na\n self.experiment_combo(ttk.Combobox).\n '
fc.experiments = fc.open_pickle_file(std.EXP_PATH)
values = []
for exp in fc.experiments:
values.append(exp.name)
self.experiment_combo.configure(values=values) | Abre o arquivo com os experimentos salvos e os exibe na
self.experiment_combo(ttk.Combobox). | interface.py | show_experiments | WilsonCazarre/ProjetoCetus | 4 | python | def show_experiments(self):
'Abre o arquivo com os experimentos salvos e os exibe na\n self.experiment_combo(ttk.Combobox).\n '
fc.experiments = fc.open_pickle_file(std.EXP_PATH)
values = []
for exp in fc.experiments:
values.append(exp.name)
self.experiment_combo.configure(values=values) | def show_experiments(self):
'Abre o arquivo com os experimentos salvos e os exibe na\n self.experiment_combo(ttk.Combobox).\n '
fc.experiments = fc.open_pickle_file(std.EXP_PATH)
values = []
for exp in fc.experiments:
values.append(exp.name)
self.experiment_combo.configure(values=values)<|docstring|>Abre o arquivo com os experimentos salvos e os exibe na
self.experiment_combo(ttk.Combobox).<|endoftext|> |
3df4e3aa24fb31342f6c96217ac94c4067226ca04a71c58aaff2337e049598f3 | def get_queryset(self):
'Return objects for the current authenticated user only'
assigned_only = bool(int(self.request.query_params.get('assigned_only', default=0)))
queryset = self.queryset
if assigned_only:
queryset = queryset.filter(recipe__isnull=False)
return queryset.filter(user=self.request.user).order_by('-name').distinct() | Return objects for the current authenticated user only | app/recipe/views.py | get_queryset | onkarsherkar/recipe-app-api | 0 | python | def get_queryset(self):
assigned_only = bool(int(self.request.query_params.get('assigned_only', default=0)))
queryset = self.queryset
if assigned_only:
queryset = queryset.filter(recipe__isnull=False)
return queryset.filter(user=self.request.user).order_by('-name').distinct() | def get_queryset(self):
assigned_only = bool(int(self.request.query_params.get('assigned_only', default=0)))
queryset = self.queryset
if assigned_only:
queryset = queryset.filter(recipe__isnull=False)
return queryset.filter(user=self.request.user).order_by('-name').distinct()<|docstring|>Return objects for the current authenticated user only<|endoftext|> |
4614e6098a91c946ef3b6113794b48c8674af7ef2345225dfb785d7e5f0e7d05 | def perform_create(self, serializer):
'Create a new Object'
serializer.save(user=self.request.user) | Create a new Object | app/recipe/views.py | perform_create | onkarsherkar/recipe-app-api | 0 | python | def perform_create(self, serializer):
serializer.save(user=self.request.user) | def perform_create(self, serializer):
serializer.save(user=self.request.user)<|docstring|>Create a new Object<|endoftext|> |
43ee8889f6fb0929156361cb8a022ac11c0959d1e39d8013680e6519157f7968 | def _param_to_ints(self, qs):
'convert a list of string IDs to a list of integers'
return [int(str_id) for str_id in qs.split(',')] | convert a list of string IDs to a list of integers | app/recipe/views.py | _param_to_ints | onkarsherkar/recipe-app-api | 0 | python | def _param_to_ints(self, qs):
return [int(str_id) for str_id in qs.split(',')] | def _param_to_ints(self, qs):
return [int(str_id) for str_id in qs.split(',')]<|docstring|>convert a list of string IDs to a list of integers<|endoftext|> |
0eba2c1a87b2b1fb00b98a4dcf59965c7bb785c0f2585f36379ec89d12de137b | def get_queryset(self):
'Retrieve the recipe for the authenticated user'
tags = self.request.query_params.get('tags')
ingredients = self.request.query_params.get('ingredients')
queryset = self.queryset
if tags:
tags_ids = self._param_to_ints(tags)
queryset = queryset.filter(tags__id__in=tags_ids)
if ingredients:
ingredients_ids = self._param_to_ints(ingredients)
queryset = queryset.filter(ingredients__id__in=ingredients_ids)
return queryset.filter(user=self.request.user) | Retrieve the recipe for the authenticated user | app/recipe/views.py | get_queryset | onkarsherkar/recipe-app-api | 0 | python | def get_queryset(self):
tags = self.request.query_params.get('tags')
ingredients = self.request.query_params.get('ingredients')
queryset = self.queryset
if tags:
tags_ids = self._param_to_ints(tags)
queryset = queryset.filter(tags__id__in=tags_ids)
if ingredients:
ingredients_ids = self._param_to_ints(ingredients)
queryset = queryset.filter(ingredients__id__in=ingredients_ids)
return queryset.filter(user=self.request.user) | def get_queryset(self):
tags = self.request.query_params.get('tags')
ingredients = self.request.query_params.get('ingredients')
queryset = self.queryset
if tags:
tags_ids = self._param_to_ints(tags)
queryset = queryset.filter(tags__id__in=tags_ids)
if ingredients:
ingredients_ids = self._param_to_ints(ingredients)
queryset = queryset.filter(ingredients__id__in=ingredients_ids)
return queryset.filter(user=self.request.user)<|docstring|>Retrieve the recipe for the authenticated user<|endoftext|> |
4076ebdb8163f12e4981ec551f41f541f35d49980d69979b22d7f50c69ee952a | def get_serializer_class(self):
'Return appropriate serializer class'
if (self.action == 'retrieve'):
return serializers.RecipeDetailSerializer
elif (self.action == 'upload_image'):
return serializers.RecipeImageSerializer
return self.serializer_class | Return appropriate serializer class | app/recipe/views.py | get_serializer_class | onkarsherkar/recipe-app-api | 0 | python | def get_serializer_class(self):
if (self.action == 'retrieve'):
return serializers.RecipeDetailSerializer
elif (self.action == 'upload_image'):
return serializers.RecipeImageSerializer
return self.serializer_class | def get_serializer_class(self):
if (self.action == 'retrieve'):
return serializers.RecipeDetailSerializer
elif (self.action == 'upload_image'):
return serializers.RecipeImageSerializer
return self.serializer_class<|docstring|>Return appropriate serializer class<|endoftext|> |
d9c56ac6020b53f2f41470e33de11252372f020d8db53e0f37ef44ed4b72f8ae | def perform_create(self, serializer):
'Create a new recipe'
serializer.save(user=self.request.user) | Create a new recipe | app/recipe/views.py | perform_create | onkarsherkar/recipe-app-api | 0 | python | def perform_create(self, serializer):
serializer.save(user=self.request.user) | def perform_create(self, serializer):
serializer.save(user=self.request.user)<|docstring|>Create a new recipe<|endoftext|> |
f09740e6bec14005c600593daad2394d6da0b8718ed67cae7484d7db9e783913 | @action(methods=['POST'], detail=True, url_path='upload-image')
def upload_image(self, request, pk=None):
'Upload an image to a recipe'
recipe = self.get_object()
serializer = self.get_serializer(recipe, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) | Upload an image to a recipe | app/recipe/views.py | upload_image | onkarsherkar/recipe-app-api | 0 | python | @action(methods=['POST'], detail=True, url_path='upload-image')
def upload_image(self, request, pk=None):
recipe = self.get_object()
serializer = self.get_serializer(recipe, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) | @action(methods=['POST'], detail=True, url_path='upload-image')
def upload_image(self, request, pk=None):
recipe = self.get_object()
serializer = self.get_serializer(recipe, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)<|docstring|>Upload an image to a recipe<|endoftext|> |
a9cafc969fd01d94825b509d84d491751c6129ce178cce639586ed96c1ae4ea0 | def action(self, req, body, tenant_id, id):
"\n Handles requests that modify existing instances in some manner. Actions\n could include 'resize', 'restart'\n :param req: http request object\n :param body: deserialized body of the request as a dict\n :param tenant_id: the tenant id for whom owns the instance\n :param id: instance id\n "
LOG.debug("instance action req : '%s'\n\n", req)
if (not body):
raise exception.BadRequest(_('Invalid request body.'))
context = req.environ[wsgi.CONTEXT_KEY]
_actions = {'restart': self._action_restart, 'resize': self._action_resize, 'promote_to_replica_source': self._action_promote_to_replica_source, 'eject_replica_source': self._action_eject_replica_source, 'reset_status': self._action_reset_status}
selected_action = None
action_name = None
for key in body:
if (key in _actions):
selected_action = _actions[key]
action_name = key
LOG.info('Performing %(action_name)s action against instance %(instance_id)s for tenant %(tenant_id)s, body: %(body)s', {'action_name': action_name, 'instance_id': id, 'tenant_id': tenant_id, 'body': body})
needs_server = True
if (action_name in ['reset_status']):
needs_server = False
instance = models.Instance.load(context, id, needs_server=needs_server)
return selected_action(context, req, instance, body) | Handles requests that modify existing instances in some manner. Actions
could include 'resize', 'restart'
:param req: http request object
:param body: deserialized body of the request as a dict
:param tenant_id: the tenant id for whom owns the instance
:param id: instance id | trove/instance/service.py | action | viettelidc-oss/trove | 1 | python | def action(self, req, body, tenant_id, id):
"\n Handles requests that modify existing instances in some manner. Actions\n could include 'resize', 'restart'\n :param req: http request object\n :param body: deserialized body of the request as a dict\n :param tenant_id: the tenant id for whom owns the instance\n :param id: instance id\n "
LOG.debug("instance action req : '%s'\n\n", req)
if (not body):
raise exception.BadRequest(_('Invalid request body.'))
context = req.environ[wsgi.CONTEXT_KEY]
_actions = {'restart': self._action_restart, 'resize': self._action_resize, 'promote_to_replica_source': self._action_promote_to_replica_source, 'eject_replica_source': self._action_eject_replica_source, 'reset_status': self._action_reset_status}
selected_action = None
action_name = None
for key in body:
if (key in _actions):
selected_action = _actions[key]
action_name = key
LOG.info('Performing %(action_name)s action against instance %(instance_id)s for tenant %(tenant_id)s, body: %(body)s', {'action_name': action_name, 'instance_id': id, 'tenant_id': tenant_id, 'body': body})
needs_server = True
if (action_name in ['reset_status']):
needs_server = False
instance = models.Instance.load(context, id, needs_server=needs_server)
return selected_action(context, req, instance, body) | def action(self, req, body, tenant_id, id):
"\n Handles requests that modify existing instances in some manner. Actions\n could include 'resize', 'restart'\n :param req: http request object\n :param body: deserialized body of the request as a dict\n :param tenant_id: the tenant id for whom owns the instance\n :param id: instance id\n "
LOG.debug("instance action req : '%s'\n\n", req)
if (not body):
raise exception.BadRequest(_('Invalid request body.'))
context = req.environ[wsgi.CONTEXT_KEY]
_actions = {'restart': self._action_restart, 'resize': self._action_resize, 'promote_to_replica_source': self._action_promote_to_replica_source, 'eject_replica_source': self._action_eject_replica_source, 'reset_status': self._action_reset_status}
selected_action = None
action_name = None
for key in body:
if (key in _actions):
selected_action = _actions[key]
action_name = key
LOG.info('Performing %(action_name)s action against instance %(instance_id)s for tenant %(tenant_id)s, body: %(body)s', {'action_name': action_name, 'instance_id': id, 'tenant_id': tenant_id, 'body': body})
needs_server = True
if (action_name in ['reset_status']):
needs_server = False
instance = models.Instance.load(context, id, needs_server=needs_server)
return selected_action(context, req, instance, body)<|docstring|>Handles requests that modify existing instances in some manner. Actions
could include 'resize', 'restart'
:param req: http request object
:param body: deserialized body of the request as a dict
:param tenant_id: the tenant id for whom owns the instance
:param id: instance id<|endoftext|> |
8e172bd4cb5589a6dcc1c3936294c7168c6ed1874bf913c9d6ed9dd83b9f363a | def _action_resize(self, context, req, instance, body):
'\n Handles 2 cases\n 1. resize volume\n body only contains {volume: {size: x}}\n 2. resize instance\n body only contains {flavorRef: http.../2}\n\n If the body has both we will throw back an error.\n '
options = {'volume': self._action_resize_volume, 'flavorRef': self._action_resize_flavor}
selected_option = None
args = None
for key in options:
if (key in body['resize']):
selected_option = options[key]
args = body['resize'][key]
break
return selected_option(context, req, instance, args) | Handles 2 cases
1. resize volume
body only contains {volume: {size: x}}
2. resize instance
body only contains {flavorRef: http.../2}
If the body has both we will throw back an error. | trove/instance/service.py | _action_resize | viettelidc-oss/trove | 1 | python | def _action_resize(self, context, req, instance, body):
'\n Handles 2 cases\n 1. resize volume\n body only contains {volume: {size: x}}\n 2. resize instance\n body only contains {flavorRef: http.../2}\n\n If the body has both we will throw back an error.\n '
options = {'volume': self._action_resize_volume, 'flavorRef': self._action_resize_flavor}
selected_option = None
args = None
for key in options:
if (key in body['resize']):
selected_option = options[key]
args = body['resize'][key]
break
return selected_option(context, req, instance, args) | def _action_resize(self, context, req, instance, body):
'\n Handles 2 cases\n 1. resize volume\n body only contains {volume: {size: x}}\n 2. resize instance\n body only contains {flavorRef: http.../2}\n\n If the body has both we will throw back an error.\n '
options = {'volume': self._action_resize_volume, 'flavorRef': self._action_resize_flavor}
selected_option = None
args = None
for key in options:
if (key in body['resize']):
selected_option = options[key]
args = body['resize'][key]
break
return selected_option(context, req, instance, args)<|docstring|>Handles 2 cases
1. resize volume
body only contains {volume: {size: x}}
2. resize instance
body only contains {flavorRef: http.../2}
If the body has both we will throw back an error.<|endoftext|> |
e96cb357a0255c8bd7ceca9505777447d748292f877350917962704347ff9f72 | def index(self, req, tenant_id):
'Return all instances.'
LOG.info("Listing database instances for tenant '%s'", tenant_id)
LOG.debug("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
policy.authorize_on_tenant(context, 'instance:index')
instances = self._get_instances(req, instance_view=views.InstanceView)
return wsgi.Result(instances, 200) | Return all instances. | trove/instance/service.py | index | viettelidc-oss/trove | 1 | python | def index(self, req, tenant_id):
LOG.info("Listing database instances for tenant '%s'", tenant_id)
LOG.debug("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
policy.authorize_on_tenant(context, 'instance:index')
instances = self._get_instances(req, instance_view=views.InstanceView)
return wsgi.Result(instances, 200) | def index(self, req, tenant_id):
LOG.info("Listing database instances for tenant '%s'", tenant_id)
LOG.debug("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
policy.authorize_on_tenant(context, 'instance:index')
instances = self._get_instances(req, instance_view=views.InstanceView)
return wsgi.Result(instances, 200)<|docstring|>Return all instances.<|endoftext|> |
ad45e1d5dac2278831f072b25d4e9e50056df8b9ee628c6b8740025e779b34af | def detail(self, req, tenant_id):
'Return all instances with details.'
LOG.info("Listing database instances with details for tenant '%s'", tenant_id)
LOG.debug("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
policy.authorize_on_tenant(context, 'instance:detail')
instances = self._get_instances(req, instance_view=views.InstanceDetailView)
return wsgi.Result(instances, 200) | Return all instances with details. | trove/instance/service.py | detail | viettelidc-oss/trove | 1 | python | def detail(self, req, tenant_id):
LOG.info("Listing database instances with details for tenant '%s'", tenant_id)
LOG.debug("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
policy.authorize_on_tenant(context, 'instance:detail')
instances = self._get_instances(req, instance_view=views.InstanceDetailView)
return wsgi.Result(instances, 200) | def detail(self, req, tenant_id):
LOG.info("Listing database instances with details for tenant '%s'", tenant_id)
LOG.debug("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
policy.authorize_on_tenant(context, 'instance:detail')
instances = self._get_instances(req, instance_view=views.InstanceDetailView)
return wsgi.Result(instances, 200)<|docstring|>Return all instances with details.<|endoftext|> |
c4613f9cf069e9677f9bd4893623ddacfd7a97d3ee5e64b6a57fdeadc1db8fa4 | def backups(self, req, tenant_id, id):
'Return all backups for the specified instance.'
LOG.info("Listing backups for instance '%s'", id)
LOG.debug("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.Instance.load(context, id)
self.authorize_instance_action(context, 'backups', instance)
(backups, marker) = backup_model.list_for_instance(context, id)
view = backup_views.BackupViews(backups)
paged = pagination.SimplePaginatedDataView(req.url, 'backups', view, marker)
return wsgi.Result(paged.data(), 200) | Return all backups for the specified instance. | trove/instance/service.py | backups | viettelidc-oss/trove | 1 | python | def backups(self, req, tenant_id, id):
LOG.info("Listing backups for instance '%s'", id)
LOG.debug("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.Instance.load(context, id)
self.authorize_instance_action(context, 'backups', instance)
(backups, marker) = backup_model.list_for_instance(context, id)
view = backup_views.BackupViews(backups)
paged = pagination.SimplePaginatedDataView(req.url, 'backups', view, marker)
return wsgi.Result(paged.data(), 200) | def backups(self, req, tenant_id, id):
LOG.info("Listing backups for instance '%s'", id)
LOG.debug("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.Instance.load(context, id)
self.authorize_instance_action(context, 'backups', instance)
(backups, marker) = backup_model.list_for_instance(context, id)
view = backup_views.BackupViews(backups)
paged = pagination.SimplePaginatedDataView(req.url, 'backups', view, marker)
return wsgi.Result(paged.data(), 200)<|docstring|>Return all backups for the specified instance.<|endoftext|> |
271782027c1f025c7be45801593cd5d0199261e999de9f7560f46e3685f2510b | def show(self, req, tenant_id, id):
'Return a single instance.'
LOG.info("Showing database instance '%(instance_id)s' for tenant '%(tenant_id)s'", {'instance_id': id, 'tenant_id': tenant_id})
LOG.debug("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
server = models.load_instance_with_info(models.DetailInstance, context, id)
self.authorize_instance_action(context, 'show', server)
return wsgi.Result(views.InstanceDetailView(server, req=req).data(), 200) | Return a single instance. | trove/instance/service.py | show | viettelidc-oss/trove | 1 | python | def show(self, req, tenant_id, id):
LOG.info("Showing database instance '%(instance_id)s' for tenant '%(tenant_id)s'", {'instance_id': id, 'tenant_id': tenant_id})
LOG.debug("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
server = models.load_instance_with_info(models.DetailInstance, context, id)
self.authorize_instance_action(context, 'show', server)
return wsgi.Result(views.InstanceDetailView(server, req=req).data(), 200) | def show(self, req, tenant_id, id):
LOG.info("Showing database instance '%(instance_id)s' for tenant '%(tenant_id)s'", {'instance_id': id, 'tenant_id': tenant_id})
LOG.debug("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
server = models.load_instance_with_info(models.DetailInstance, context, id)
self.authorize_instance_action(context, 'show', server)
return wsgi.Result(views.InstanceDetailView(server, req=req).data(), 200)<|docstring|>Return a single instance.<|endoftext|> |
c8f937a456c1356e37853c33e928a9b8db287b6e672454df9c52585ca1694748 | def delete(self, req, tenant_id, id):
'Delete a single instance.'
LOG.info("Deleting database instance '%(instance_id)s' for tenant '%(tenant_id)s'", {'instance_id': id, 'tenant_id': tenant_id})
LOG.debug("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.load_any_instance(context, id)
self.authorize_instance_action(context, 'delete', instance)
context.notification = notification.DBaaSInstanceDelete(context, request=req)
with StartNotification(context, instance_id=instance.id):
marker = 'foo'
while marker:
(instance_modules, marker) = module_models.InstanceModules.load(context, instance_id=id)
for instance_module in instance_modules:
instance_module = module_models.InstanceModule.load(context, instance_module['instance_id'], instance_module['module_id'])
module_models.InstanceModule.delete(context, instance_module)
instance.delete()
return wsgi.Result(None, 202) | Delete a single instance. | trove/instance/service.py | delete | viettelidc-oss/trove | 1 | python | def delete(self, req, tenant_id, id):
LOG.info("Deleting database instance '%(instance_id)s' for tenant '%(tenant_id)s'", {'instance_id': id, 'tenant_id': tenant_id})
LOG.debug("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.load_any_instance(context, id)
self.authorize_instance_action(context, 'delete', instance)
context.notification = notification.DBaaSInstanceDelete(context, request=req)
with StartNotification(context, instance_id=instance.id):
marker = 'foo'
while marker:
(instance_modules, marker) = module_models.InstanceModules.load(context, instance_id=id)
for instance_module in instance_modules:
instance_module = module_models.InstanceModule.load(context, instance_module['instance_id'], instance_module['module_id'])
module_models.InstanceModule.delete(context, instance_module)
instance.delete()
return wsgi.Result(None, 202) | def delete(self, req, tenant_id, id):
LOG.info("Deleting database instance '%(instance_id)s' for tenant '%(tenant_id)s'", {'instance_id': id, 'tenant_id': tenant_id})
LOG.debug("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.load_any_instance(context, id)
self.authorize_instance_action(context, 'delete', instance)
context.notification = notification.DBaaSInstanceDelete(context, request=req)
with StartNotification(context, instance_id=instance.id):
marker = 'foo'
while marker:
(instance_modules, marker) = module_models.InstanceModules.load(context, instance_id=id)
for instance_module in instance_modules:
instance_module = module_models.InstanceModule.load(context, instance_module['instance_id'], instance_module['module_id'])
module_models.InstanceModule.delete(context, instance_module)
instance.delete()
return wsgi.Result(None, 202)<|docstring|>Delete a single instance.<|endoftext|> |
612d4c61e983e8eed109a72deaca62c48556dff94bc346d6c402684276bc7afb | def update(self, req, id, body, tenant_id):
'Updates the instance to attach/detach configuration.'
LOG.info("Updating database instance '%(instance_id)s' for tenant '%(tenant_id)s'", {'instance_id': id, 'tenant_id': tenant_id})
LOG.debug('req: %s', req)
LOG.debug('body: %s', body)
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.Instance.load(context, id)
self.authorize_instance_action(context, 'update', instance)
args = {}
args['configuration_id'] = self._configuration_parse(context, body)
self._modify_instance(context, req, instance, **args)
return wsgi.Result(None, 202) | Updates the instance to attach/detach configuration. | trove/instance/service.py | update | viettelidc-oss/trove | 1 | python | def update(self, req, id, body, tenant_id):
LOG.info("Updating database instance '%(instance_id)s' for tenant '%(tenant_id)s'", {'instance_id': id, 'tenant_id': tenant_id})
LOG.debug('req: %s', req)
LOG.debug('body: %s', body)
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.Instance.load(context, id)
self.authorize_instance_action(context, 'update', instance)
args = {}
args['configuration_id'] = self._configuration_parse(context, body)
self._modify_instance(context, req, instance, **args)
return wsgi.Result(None, 202) | def update(self, req, id, body, tenant_id):
LOG.info("Updating database instance '%(instance_id)s' for tenant '%(tenant_id)s'", {'instance_id': id, 'tenant_id': tenant_id})
LOG.debug('req: %s', req)
LOG.debug('body: %s', body)
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.Instance.load(context, id)
self.authorize_instance_action(context, 'update', instance)
args = {}
args['configuration_id'] = self._configuration_parse(context, body)
self._modify_instance(context, req, instance, **args)
return wsgi.Result(None, 202)<|docstring|>Updates the instance to attach/detach configuration.<|endoftext|> |
1cfdc30123157fc0f10c183ccbca46d01bbd7f4e66e5608a769657310facb669 | def edit(self, req, id, body, tenant_id):
'\n Updates the instance to set or unset one or more attributes.\n '
LOG.info('Editing instance for tenant id %s.', tenant_id)
LOG.debug('req: %s', strutils.mask_password(req))
LOG.debug('body: %s', strutils.mask_password(body))
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.Instance.load(context, id)
self.authorize_instance_action(context, 'edit', instance)
args = {}
args['detach_replica'] = (('replica_of' in body['instance']) or ('slave_of' in body['instance']))
if ('name' in body['instance']):
args['name'] = body['instance']['name']
if ('configuration' in body['instance']):
args['configuration_id'] = self._configuration_parse(context, body)
if ('datastore_version' in body['instance']):
args['datastore_version'] = body['instance'].get('datastore_version')
self._modify_instance(context, req, instance, **args)
return wsgi.Result(None, 202) | Updates the instance to set or unset one or more attributes. | trove/instance/service.py | edit | viettelidc-oss/trove | 1 | python | def edit(self, req, id, body, tenant_id):
'\n \n '
LOG.info('Editing instance for tenant id %s.', tenant_id)
LOG.debug('req: %s', strutils.mask_password(req))
LOG.debug('body: %s', strutils.mask_password(body))
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.Instance.load(context, id)
self.authorize_instance_action(context, 'edit', instance)
args = {}
args['detach_replica'] = (('replica_of' in body['instance']) or ('slave_of' in body['instance']))
if ('name' in body['instance']):
args['name'] = body['instance']['name']
if ('configuration' in body['instance']):
args['configuration_id'] = self._configuration_parse(context, body)
if ('datastore_version' in body['instance']):
args['datastore_version'] = body['instance'].get('datastore_version')
self._modify_instance(context, req, instance, **args)
return wsgi.Result(None, 202) | def edit(self, req, id, body, tenant_id):
'\n \n '
LOG.info('Editing instance for tenant id %s.', tenant_id)
LOG.debug('req: %s', strutils.mask_password(req))
LOG.debug('body: %s', strutils.mask_password(body))
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.Instance.load(context, id)
self.authorize_instance_action(context, 'edit', instance)
args = {}
args['detach_replica'] = (('replica_of' in body['instance']) or ('slave_of' in body['instance']))
if ('name' in body['instance']):
args['name'] = body['instance']['name']
if ('configuration' in body['instance']):
args['configuration_id'] = self._configuration_parse(context, body)
if ('datastore_version' in body['instance']):
args['datastore_version'] = body['instance'].get('datastore_version')
self._modify_instance(context, req, instance, **args)
return wsgi.Result(None, 202)<|docstring|>Updates the instance to set or unset one or more attributes.<|endoftext|> |
629e4f73cc10200441da2fdec5f703a49110f88fd82dd5d0ad70c271b9a24841 | def configuration(self, req, tenant_id, id):
'\n Returns the default configuration template applied to the instance.\n '
LOG.info('Getting default configuration for instance %s', id)
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.Instance.load(context, id)
self.authorize_instance_action(context, 'configuration', instance)
LOG.debug('Server: %s', instance)
config = instance.get_default_configuration_template()
LOG.debug('Default config for instance %(instance_id)s is %(config)s', {'instance_id': id, 'config': config})
return wsgi.Result(views.DefaultConfigurationView(config).data(), 200) | Returns the default configuration template applied to the instance. | trove/instance/service.py | configuration | viettelidc-oss/trove | 1 | python | def configuration(self, req, tenant_id, id):
'\n \n '
LOG.info('Getting default configuration for instance %s', id)
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.Instance.load(context, id)
self.authorize_instance_action(context, 'configuration', instance)
LOG.debug('Server: %s', instance)
config = instance.get_default_configuration_template()
LOG.debug('Default config for instance %(instance_id)s is %(config)s', {'instance_id': id, 'config': config})
return wsgi.Result(views.DefaultConfigurationView(config).data(), 200) | def configuration(self, req, tenant_id, id):
'\n \n '
LOG.info('Getting default configuration for instance %s', id)
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.Instance.load(context, id)
self.authorize_instance_action(context, 'configuration', instance)
LOG.debug('Server: %s', instance)
config = instance.get_default_configuration_template()
LOG.debug('Default config for instance %(instance_id)s is %(config)s', {'instance_id': id, 'config': config})
return wsgi.Result(views.DefaultConfigurationView(config).data(), 200)<|docstring|>Returns the default configuration template applied to the instance.<|endoftext|> |
44aac48af8474d601904ab990bebdd8813f7e6e67d2edcad9611a9f594585dfe | def guest_log_list(self, req, tenant_id, id):
'Return all information about all logs for an instance.'
LOG.debug('Listing logs for tenant %s', tenant_id)
context = req.environ[wsgi.CONTEXT_KEY]
try:
backup_model.verify_swift_auth_token(context)
except exception.SwiftNotFound:
raise exception.LogsNotAvailable()
instance = models.Instance.load(context, id)
if (not instance):
raise exception.NotFound(uuid=id)
self.authorize_instance_action(context, 'guest_log_list', instance)
client = clients.create_guest_client(context, id)
guest_log_list = client.guest_log_list()
return wsgi.Result({'logs': guest_log_list}, 200) | Return all information about all logs for an instance. | trove/instance/service.py | guest_log_list | viettelidc-oss/trove | 1 | python | def guest_log_list(self, req, tenant_id, id):
LOG.debug('Listing logs for tenant %s', tenant_id)
context = req.environ[wsgi.CONTEXT_KEY]
try:
backup_model.verify_swift_auth_token(context)
except exception.SwiftNotFound:
raise exception.LogsNotAvailable()
instance = models.Instance.load(context, id)
if (not instance):
raise exception.NotFound(uuid=id)
self.authorize_instance_action(context, 'guest_log_list', instance)
client = clients.create_guest_client(context, id)
guest_log_list = client.guest_log_list()
return wsgi.Result({'logs': guest_log_list}, 200) | def guest_log_list(self, req, tenant_id, id):
LOG.debug('Listing logs for tenant %s', tenant_id)
context = req.environ[wsgi.CONTEXT_KEY]
try:
backup_model.verify_swift_auth_token(context)
except exception.SwiftNotFound:
raise exception.LogsNotAvailable()
instance = models.Instance.load(context, id)
if (not instance):
raise exception.NotFound(uuid=id)
self.authorize_instance_action(context, 'guest_log_list', instance)
client = clients.create_guest_client(context, id)
guest_log_list = client.guest_log_list()
return wsgi.Result({'logs': guest_log_list}, 200)<|docstring|>Return all information about all logs for an instance.<|endoftext|> |
51ffe4f5d669299761cc1e33bdd4e85e94262ec25bd03bf4d898e13e9fed7fc7 | def guest_log_action(self, req, body, tenant_id, id):
'Processes a guest log.'
LOG.info('Processing log for tenant %s', tenant_id)
context = req.environ[wsgi.CONTEXT_KEY]
try:
backup_model.verify_swift_auth_token(context)
except exception.SwiftNotFound:
raise exception.LogsNotAvailable()
instance = models.Instance.load(context, id)
if (not instance):
raise exception.NotFound(uuid=id)
log_name = body['name']
enable = body.get('enable', None)
disable = body.get('disable', None)
publish = body.get('publish', None)
discard = body.get('discard', None)
if (enable and disable):
raise exception.BadRequest(_('Cannot enable and disable log.'))
client = clients.create_guest_client(context, id)
guest_log = client.guest_log_action(log_name, enable, disable, publish, discard)
return wsgi.Result({'log': guest_log}, 200) | Processes a guest log. | trove/instance/service.py | guest_log_action | viettelidc-oss/trove | 1 | python | def guest_log_action(self, req, body, tenant_id, id):
LOG.info('Processing log for tenant %s', tenant_id)
context = req.environ[wsgi.CONTEXT_KEY]
try:
backup_model.verify_swift_auth_token(context)
except exception.SwiftNotFound:
raise exception.LogsNotAvailable()
instance = models.Instance.load(context, id)
if (not instance):
raise exception.NotFound(uuid=id)
log_name = body['name']
enable = body.get('enable', None)
disable = body.get('disable', None)
publish = body.get('publish', None)
discard = body.get('discard', None)
if (enable and disable):
raise exception.BadRequest(_('Cannot enable and disable log.'))
client = clients.create_guest_client(context, id)
guest_log = client.guest_log_action(log_name, enable, disable, publish, discard)
return wsgi.Result({'log': guest_log}, 200) | def guest_log_action(self, req, body, tenant_id, id):
LOG.info('Processing log for tenant %s', tenant_id)
context = req.environ[wsgi.CONTEXT_KEY]
try:
backup_model.verify_swift_auth_token(context)
except exception.SwiftNotFound:
raise exception.LogsNotAvailable()
instance = models.Instance.load(context, id)
if (not instance):
raise exception.NotFound(uuid=id)
log_name = body['name']
enable = body.get('enable', None)
disable = body.get('disable', None)
publish = body.get('publish', None)
discard = body.get('discard', None)
if (enable and disable):
raise exception.BadRequest(_('Cannot enable and disable log.'))
client = clients.create_guest_client(context, id)
guest_log = client.guest_log_action(log_name, enable, disable, publish, discard)
return wsgi.Result({'log': guest_log}, 200)<|docstring|>Processes a guest log.<|endoftext|> |
461038183d839ea51070dcbd0e8824a2e3e64a2953dd0e91ed606fdafaf51e74 | def module_list(self, req, tenant_id, id):
'Return information about modules on an instance.'
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.Instance.load(context, id)
if (not instance):
raise exception.NotFound(uuid=id)
self.authorize_instance_action(context, 'module_list', instance)
from_guest = bool(req.GET.get('from_guest', '').lower())
include_contents = bool(req.GET.get('include_contents', '').lower())
if from_guest:
return self._module_list_guest(context, id, include_contents=include_contents)
else:
return self._module_list(context, id, include_contents=include_contents) | Return information about modules on an instance. | trove/instance/service.py | module_list | viettelidc-oss/trove | 1 | python | def module_list(self, req, tenant_id, id):
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.Instance.load(context, id)
if (not instance):
raise exception.NotFound(uuid=id)
self.authorize_instance_action(context, 'module_list', instance)
from_guest = bool(req.GET.get('from_guest', ).lower())
include_contents = bool(req.GET.get('include_contents', ).lower())
if from_guest:
return self._module_list_guest(context, id, include_contents=include_contents)
else:
return self._module_list(context, id, include_contents=include_contents) | def module_list(self, req, tenant_id, id):
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.Instance.load(context, id)
if (not instance):
raise exception.NotFound(uuid=id)
self.authorize_instance_action(context, 'module_list', instance)
from_guest = bool(req.GET.get('from_guest', ).lower())
include_contents = bool(req.GET.get('include_contents', ).lower())
if from_guest:
return self._module_list_guest(context, id, include_contents=include_contents)
else:
return self._module_list(context, id, include_contents=include_contents)<|docstring|>Return information about modules on an instance.<|endoftext|> |
92d2d7102966924162bc926a217c6544f46f5d4b9a4e42096cdfe762adf5aff0 | def _module_list_guest(self, context, id, include_contents):
'Return information about modules on an instance.'
client = clients.create_guest_client(context, id)
result_list = client.module_list(include_contents)
return wsgi.Result({'modules': result_list}, 200) | Return information about modules on an instance. | trove/instance/service.py | _module_list_guest | viettelidc-oss/trove | 1 | python | def _module_list_guest(self, context, id, include_contents):
client = clients.create_guest_client(context, id)
result_list = client.module_list(include_contents)
return wsgi.Result({'modules': result_list}, 200) | def _module_list_guest(self, context, id, include_contents):
client = clients.create_guest_client(context, id)
result_list = client.module_list(include_contents)
return wsgi.Result({'modules': result_list}, 200)<|docstring|>Return information about modules on an instance.<|endoftext|> |
7e78ffb6307c1f4ba317f2203ecd169f08c32e8c37b3a85554368fc54566c8b6 | def _module_list(self, context, id, include_contents):
'Return information about instance modules.'
client = clients.create_guest_client(context, id)
result_list = client.module_list(include_contents)
return wsgi.Result({'modules': result_list}, 200) | Return information about instance modules. | trove/instance/service.py | _module_list | viettelidc-oss/trove | 1 | python | def _module_list(self, context, id, include_contents):
client = clients.create_guest_client(context, id)
result_list = client.module_list(include_contents)
return wsgi.Result({'modules': result_list}, 200) | def _module_list(self, context, id, include_contents):
client = clients.create_guest_client(context, id)
result_list = client.module_list(include_contents)
return wsgi.Result({'modules': result_list}, 200)<|docstring|>Return information about instance modules.<|endoftext|> |
9ea4c7dcfabcb6e871ab502fcde5f1b1df3ed00b13a49adeeb1409c4c1d567a0 | def module_apply(self, req, body, tenant_id, id):
'Apply modules to an instance.'
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.Instance.load(context, id)
if (not instance):
raise exception.NotFound(uuid=id)
self.authorize_instance_action(context, 'module_apply', instance)
module_ids = [mod['id'] for mod in body.get('modules', [])]
modules = module_models.Modules.load_by_ids(context, module_ids)
module_models.Modules.validate(modules, instance.datastore.id, instance.datastore_version.id)
module_list = module_views.convert_modules_to_list(modules)
client = clients.create_guest_client(context, id)
result_list = client.module_apply(module_list)
models.Instance.add_instance_modules(context, id, modules)
return wsgi.Result({'modules': result_list}, 200) | Apply modules to an instance. | trove/instance/service.py | module_apply | viettelidc-oss/trove | 1 | python | def module_apply(self, req, body, tenant_id, id):
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.Instance.load(context, id)
if (not instance):
raise exception.NotFound(uuid=id)
self.authorize_instance_action(context, 'module_apply', instance)
module_ids = [mod['id'] for mod in body.get('modules', [])]
modules = module_models.Modules.load_by_ids(context, module_ids)
module_models.Modules.validate(modules, instance.datastore.id, instance.datastore_version.id)
module_list = module_views.convert_modules_to_list(modules)
client = clients.create_guest_client(context, id)
result_list = client.module_apply(module_list)
models.Instance.add_instance_modules(context, id, modules)
return wsgi.Result({'modules': result_list}, 200) | def module_apply(self, req, body, tenant_id, id):
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.Instance.load(context, id)
if (not instance):
raise exception.NotFound(uuid=id)
self.authorize_instance_action(context, 'module_apply', instance)
module_ids = [mod['id'] for mod in body.get('modules', [])]
modules = module_models.Modules.load_by_ids(context, module_ids)
module_models.Modules.validate(modules, instance.datastore.id, instance.datastore_version.id)
module_list = module_views.convert_modules_to_list(modules)
client = clients.create_guest_client(context, id)
result_list = client.module_apply(module_list)
models.Instance.add_instance_modules(context, id, modules)
return wsgi.Result({'modules': result_list}, 200)<|docstring|>Apply modules to an instance.<|endoftext|> |
9817211df3fd3597230f84ac1be57cecfd2aa896f6762e66b54083ced0f8eed6 | def module_remove(self, req, tenant_id, id, module_id):
'Remove module from an instance.'
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.Instance.load(context, id)
if (not instance):
raise exception.NotFound(uuid=id)
self.authorize_instance_action(context, 'module_remove', instance)
module = module_models.Module.load(context, module_id)
module_info = module_views.DetailedModuleView(module).data()
client = clients.create_guest_client(context, id)
client.module_remove(module_info)
instance_modules = module_models.InstanceModules.load_all(context, instance_id=id, module_id=module_id)
for instance_module in instance_modules:
module_models.InstanceModule.delete(context, instance_module)
LOG.debug('Deleted IM record %(instance_module_id)s (instance %(id)s, module %(module_id)s).', {'instance_module_id': instance_module.id, 'id': id, 'module_id': module_id})
return wsgi.Result(None, 200) | Remove module from an instance. | trove/instance/service.py | module_remove | viettelidc-oss/trove | 1 | python | def module_remove(self, req, tenant_id, id, module_id):
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.Instance.load(context, id)
if (not instance):
raise exception.NotFound(uuid=id)
self.authorize_instance_action(context, 'module_remove', instance)
module = module_models.Module.load(context, module_id)
module_info = module_views.DetailedModuleView(module).data()
client = clients.create_guest_client(context, id)
client.module_remove(module_info)
instance_modules = module_models.InstanceModules.load_all(context, instance_id=id, module_id=module_id)
for instance_module in instance_modules:
module_models.InstanceModule.delete(context, instance_module)
LOG.debug('Deleted IM record %(instance_module_id)s (instance %(id)s, module %(module_id)s).', {'instance_module_id': instance_module.id, 'id': id, 'module_id': module_id})
return wsgi.Result(None, 200) | def module_remove(self, req, tenant_id, id, module_id):
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.Instance.load(context, id)
if (not instance):
raise exception.NotFound(uuid=id)
self.authorize_instance_action(context, 'module_remove', instance)
module = module_models.Module.load(context, module_id)
module_info = module_views.DetailedModuleView(module).data()
client = clients.create_guest_client(context, id)
client.module_remove(module_info)
instance_modules = module_models.InstanceModules.load_all(context, instance_id=id, module_id=module_id)
for instance_module in instance_modules:
module_models.InstanceModule.delete(context, instance_module)
LOG.debug('Deleted IM record %(instance_module_id)s (instance %(id)s, module %(module_id)s).', {'instance_module_id': instance_module.id, 'id': id, 'module_id': module_id})
return wsgi.Result(None, 200)<|docstring|>Remove module from an instance.<|endoftext|> |
3fba101d8bcbb3aaf6b952e60cfeb3152bcf17dd2881aa3460d27ca42928bded | def get_current_position(self, entity_id):
'\n overwrite it to provide your real position\n\n :param entity_id:\n '
pass | overwrite it to provide your real position
:param entity_id: | zvt/trader/account.py | get_current_position | Zuojinfang/zvt | 2 | python | def get_current_position(self, entity_id):
'\n overwrite it to provide your real position\n\n :param entity_id:\n '
pass | def get_current_position(self, entity_id):
'\n overwrite it to provide your real position\n\n :param entity_id:\n '
pass<|docstring|>overwrite it to provide your real position
:param entity_id:<|endoftext|> |
ad026c75c9277f6b8b681f0de613b1ac43f79417f903643370611323bfd9fcad | def persist_account(self, timestamp):
'\n save the account to db,we do this after closing time every day\n\n :param timestamp:\n :type timestamp:\n '
the_id = '{}_{}'.format(self.trader_name, to_time_str(timestamp, TIME_FORMAT_ISO8601))
for position in self.account.positions:
position.id = '{}_{}_{}'.format(self.trader_name, position.entity_id, to_time_str(timestamp, TIME_FORMAT_ISO8601))
position.timestamp = to_pd_timestamp(timestamp)
position.account_stats_id = the_id
self.account.id = the_id
self.logger.info('persist_account:{}'.format(account_stats_schema.dump(self.account)))
self.session.add(self.account)
self.session.commit() | save the account to db,we do this after closing time every day
:param timestamp:
:type timestamp: | zvt/trader/account.py | persist_account | Zuojinfang/zvt | 2 | python | def persist_account(self, timestamp):
'\n save the account to db,we do this after closing time every day\n\n :param timestamp:\n :type timestamp:\n '
the_id = '{}_{}'.format(self.trader_name, to_time_str(timestamp, TIME_FORMAT_ISO8601))
for position in self.account.positions:
position.id = '{}_{}_{}'.format(self.trader_name, position.entity_id, to_time_str(timestamp, TIME_FORMAT_ISO8601))
position.timestamp = to_pd_timestamp(timestamp)
position.account_stats_id = the_id
self.account.id = the_id
self.logger.info('persist_account:{}'.format(account_stats_schema.dump(self.account)))
self.session.add(self.account)
self.session.commit() | def persist_account(self, timestamp):
'\n save the account to db,we do this after closing time every day\n\n :param timestamp:\n :type timestamp:\n '
the_id = '{}_{}'.format(self.trader_name, to_time_str(timestamp, TIME_FORMAT_ISO8601))
for position in self.account.positions:
position.id = '{}_{}_{}'.format(self.trader_name, position.entity_id, to_time_str(timestamp, TIME_FORMAT_ISO8601))
position.timestamp = to_pd_timestamp(timestamp)
position.account_stats_id = the_id
self.account.id = the_id
self.logger.info('persist_account:{}'.format(account_stats_schema.dump(self.account)))
self.session.add(self.account)
self.session.commit()<|docstring|>save the account to db,we do this after closing time every day
:param timestamp:
:type timestamp:<|endoftext|> |
9d9158cc32c94407c24c931448da2f3d615e50b805eed839a5eea3ea2c55cf66 | def get_current_position(self, entity_id) -> Position:
'\n get current position to decide whether order could make\n\n :param entity_id:\n :type entity_id: str\n :return:\n :rtype: None\n '
for position in self.account.positions:
if (position.entity_id == entity_id):
return position
return None | get current position to decide whether order could make
:param entity_id:
:type entity_id: str
:return:
:rtype: None | zvt/trader/account.py | get_current_position | Zuojinfang/zvt | 2 | python | def get_current_position(self, entity_id) -> Position:
'\n get current position to decide whether order could make\n\n :param entity_id:\n :type entity_id: str\n :return:\n :rtype: None\n '
for position in self.account.positions:
if (position.entity_id == entity_id):
return position
return None | def get_current_position(self, entity_id) -> Position:
'\n get current position to decide whether order could make\n\n :param entity_id:\n :type entity_id: str\n :return:\n :rtype: None\n '
for position in self.account.positions:
if (position.entity_id == entity_id):
return position
return None<|docstring|>get current position to decide whether order could make
:param entity_id:
:type entity_id: str
:return:
:rtype: None<|endoftext|> |
5e897f320ad669cd5c946f1e86b8924dc0d8e3fbcecf27d1b921c15aca9bd641 | def update_position(self, current_position, order_amount, current_price, order_type, timestamp):
'\n\n :param timestamp:\n :type timestamp:\n :param current_position:\n :type current_position: Position\n :param order_amount:\n :type order_amount:\n :param current_price:\n :type current_price:\n :param order_type:\n :type order_type:\n '
if (order_type == ORDER_TYPE_LONG):
need_money = ((order_amount * current_price) * ((1 + self.slippage) + self.buy_cost))
if (self.account.cash < need_money):
if self.rich_mode:
self.input_money()
else:
raise NotEnoughMoneyError()
self.account.cash -= need_money
long_amount = (current_position.long_amount + order_amount)
current_position.average_long_price = (((current_position.average_long_price * current_position.long_amount) + (current_price * order_amount)) / long_amount)
current_position.long_amount = long_amount
if (current_position.trading_t == 0):
current_position.available_long += order_amount
elif (order_type == ORDER_TYPE_SHORT):
need_money = ((order_amount * current_price) * ((1 + self.slippage) + self.buy_cost))
if (self.account.cash < need_money):
if self.rich_mode:
self.input_money()
else:
raise NotEnoughMoneyError()
self.account.cash -= need_money
short_amount = (current_position.short_amount + order_amount)
current_position.average_short_price = (((current_position.average_short_price * current_position.short_amount) + (current_price * order_amount)) / short_amount)
current_position.short_amount = short_amount
if (current_position.trading_t == 0):
current_position.available_short += order_amount
elif (order_type == ORDER_TYPE_CLOSE_LONG):
self.account.cash += ((order_amount * current_price) * ((1 - self.slippage) - self.sell_cost))
current_position.available_long -= order_amount
current_position.long_amount -= order_amount
elif (order_type == ORDER_TYPE_CLOSE_SHORT):
self.account.cash += (2 * (order_amount * current_position.average_short_price))
self.account.cash -= ((order_amount * current_price) * ((1 + self.slippage) + self.sell_cost))
current_position.available_short -= order_amount
current_position.short_amount -= order_amount
order_id = '{}_{}_{}_{}'.format(self.trader_name, order_type, current_position.entity_id, to_time_str(timestamp, TIME_FORMAT_ISO8601))
order = Order(id=order_id, timestamp=to_pd_timestamp(timestamp), trader_name=self.trader_name, entity_id=current_position.entity_id, order_price=current_price, order_amount=order_amount, order_type=order_type, level=self.level.value, status='success')
self.session.add(order)
self.session.commit() | :param timestamp:
:type timestamp:
:param current_position:
:type current_position: Position
:param order_amount:
:type order_amount:
:param current_price:
:type current_price:
:param order_type:
:type order_type: | zvt/trader/account.py | update_position | Zuojinfang/zvt | 2 | python | def update_position(self, current_position, order_amount, current_price, order_type, timestamp):
'\n\n :param timestamp:\n :type timestamp:\n :param current_position:\n :type current_position: Position\n :param order_amount:\n :type order_amount:\n :param current_price:\n :type current_price:\n :param order_type:\n :type order_type:\n '
if (order_type == ORDER_TYPE_LONG):
need_money = ((order_amount * current_price) * ((1 + self.slippage) + self.buy_cost))
if (self.account.cash < need_money):
if self.rich_mode:
self.input_money()
else:
raise NotEnoughMoneyError()
self.account.cash -= need_money
long_amount = (current_position.long_amount + order_amount)
current_position.average_long_price = (((current_position.average_long_price * current_position.long_amount) + (current_price * order_amount)) / long_amount)
current_position.long_amount = long_amount
if (current_position.trading_t == 0):
current_position.available_long += order_amount
elif (order_type == ORDER_TYPE_SHORT):
need_money = ((order_amount * current_price) * ((1 + self.slippage) + self.buy_cost))
if (self.account.cash < need_money):
if self.rich_mode:
self.input_money()
else:
raise NotEnoughMoneyError()
self.account.cash -= need_money
short_amount = (current_position.short_amount + order_amount)
current_position.average_short_price = (((current_position.average_short_price * current_position.short_amount) + (current_price * order_amount)) / short_amount)
current_position.short_amount = short_amount
if (current_position.trading_t == 0):
current_position.available_short += order_amount
elif (order_type == ORDER_TYPE_CLOSE_LONG):
self.account.cash += ((order_amount * current_price) * ((1 - self.slippage) - self.sell_cost))
current_position.available_long -= order_amount
current_position.long_amount -= order_amount
elif (order_type == ORDER_TYPE_CLOSE_SHORT):
self.account.cash += (2 * (order_amount * current_position.average_short_price))
self.account.cash -= ((order_amount * current_price) * ((1 + self.slippage) + self.sell_cost))
current_position.available_short -= order_amount
current_position.short_amount -= order_amount
order_id = '{}_{}_{}_{}'.format(self.trader_name, order_type, current_position.entity_id, to_time_str(timestamp, TIME_FORMAT_ISO8601))
order = Order(id=order_id, timestamp=to_pd_timestamp(timestamp), trader_name=self.trader_name, entity_id=current_position.entity_id, order_price=current_price, order_amount=order_amount, order_type=order_type, level=self.level.value, status='success')
self.session.add(order)
self.session.commit() | def update_position(self, current_position, order_amount, current_price, order_type, timestamp):
'\n\n :param timestamp:\n :type timestamp:\n :param current_position:\n :type current_position: Position\n :param order_amount:\n :type order_amount:\n :param current_price:\n :type current_price:\n :param order_type:\n :type order_type:\n '
if (order_type == ORDER_TYPE_LONG):
need_money = ((order_amount * current_price) * ((1 + self.slippage) + self.buy_cost))
if (self.account.cash < need_money):
if self.rich_mode:
self.input_money()
else:
raise NotEnoughMoneyError()
self.account.cash -= need_money
long_amount = (current_position.long_amount + order_amount)
current_position.average_long_price = (((current_position.average_long_price * current_position.long_amount) + (current_price * order_amount)) / long_amount)
current_position.long_amount = long_amount
if (current_position.trading_t == 0):
current_position.available_long += order_amount
elif (order_type == ORDER_TYPE_SHORT):
need_money = ((order_amount * current_price) * ((1 + self.slippage) + self.buy_cost))
if (self.account.cash < need_money):
if self.rich_mode:
self.input_money()
else:
raise NotEnoughMoneyError()
self.account.cash -= need_money
short_amount = (current_position.short_amount + order_amount)
current_position.average_short_price = (((current_position.average_short_price * current_position.short_amount) + (current_price * order_amount)) / short_amount)
current_position.short_amount = short_amount
if (current_position.trading_t == 0):
current_position.available_short += order_amount
elif (order_type == ORDER_TYPE_CLOSE_LONG):
self.account.cash += ((order_amount * current_price) * ((1 - self.slippage) - self.sell_cost))
current_position.available_long -= order_amount
current_position.long_amount -= order_amount
elif (order_type == ORDER_TYPE_CLOSE_SHORT):
self.account.cash += (2 * (order_amount * current_position.average_short_price))
self.account.cash -= ((order_amount * current_price) * ((1 + self.slippage) + self.sell_cost))
current_position.available_short -= order_amount
current_position.short_amount -= order_amount
order_id = '{}_{}_{}_{}'.format(self.trader_name, order_type, current_position.entity_id, to_time_str(timestamp, TIME_FORMAT_ISO8601))
order = Order(id=order_id, timestamp=to_pd_timestamp(timestamp), trader_name=self.trader_name, entity_id=current_position.entity_id, order_price=current_price, order_amount=order_amount, order_type=order_type, level=self.level.value, status='success')
self.session.add(order)
self.session.commit()<|docstring|>:param timestamp:
:type timestamp:
:param current_position:
:type current_position: Position
:param order_amount:
:type order_amount:
:param current_price:
:type current_price:
:param order_type:
:type order_type:<|endoftext|> |
c7d3f2b5b6337765858e67339f0e0f1a3054a5a221538618d5e3658f6d08bba6 | def order(self, entity_id, current_price, current_timestamp, order_amount=0, order_pct=1.0, order_price=0, order_type=ORDER_TYPE_LONG, order_money=0):
'\n 下单\n\n Parameters\n ----------\n entity_id : str\n 交易标的id\n\n current_price : float\n 当前价格\n\n current_timestamp: timestamp\n 下单的时间\n\n order_amount : int\n 数量\n\n order_pct : float\n 使用可用现金(仓位)的百分比,0.0-1.0\n\n order_price : float\n 用于限价交易\n\n order_type : {ORDER_TYPE_LONG,ORDER_TYPE_SHORT,ORDER_TYPE_CLOSE_LONG,ORDER_TYPE_CLOSE_SHORT}\n 交易类型\n\n Returns\n\n '
if (order_price == 0):
current_position = self.get_current_position(entity_id=entity_id)
if (not current_position):
trading_t = self.entity_schema.get_trading_t()
current_position = Position(trader_name=self.trader_name, entity_id=entity_id, long_amount=0, available_long=0, average_long_price=0, short_amount=0, available_short=0, average_short_price=0, profit=0, value=0, trading_t=trading_t)
self.account.positions.append(current_position)
if (order_money > 0):
if (order_type == ORDER_TYPE_LONG):
if (current_position.short_amount > 0):
raise InvalidOrderError('close the short position before open long')
if (order_money > self.account.cash):
if self.rich_mode:
self.input_money()
else:
raise NotEnoughMoneyError()
cost = (current_price * ((1 + self.slippage) + self.buy_cost))
order_amount = (order_money // cost)
if (order_amount < 100):
if self.rich_mode:
self.input_money()
else:
raise NotEnoughMoneyError()
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
elif (order_type == ORDER_TYPE_SHORT):
if (current_position.long_amount > 0):
raise InvalidOrderError('close the long position before open short')
if (order_money > self.account.cash):
if self.rich_mode:
self.input_money()
else:
raise NotEnoughMoneyError()
cost = (current_price * ((1 + self.slippage) + self.buy_cost))
order_amount = (order_money // cost)
if (order_amount < 100):
if self.rich_mode:
self.input_money()
else:
raise NotEnoughMoneyError()
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
else:
raise InvalidOrderParamError('close long/short not support order_money')
elif (order_amount > 0):
if (order_type == ORDER_TYPE_LONG):
if (current_position.short_amount > 0):
raise InvalidOrderError('close the short position before open long')
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
elif (order_type == ORDER_TYPE_SHORT):
if (current_position.long_amount > 0):
raise InvalidOrderError('close the long position before open short')
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
elif (order_type == ORDER_TYPE_CLOSE_LONG):
if (current_position.available_long >= order_amount):
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
else:
raise NotEnoughPositionError()
elif (order_type == ORDER_TYPE_CLOSE_SHORT):
if (current_position.available_short >= order_amount):
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
else:
raise Exception('not enough position')
elif (0 < order_pct <= 1):
if (order_type == ORDER_TYPE_LONG):
if (current_position.short_amount > 0):
raise InvalidOrderError('close the short position before open long')
cost = (current_price * ((1 + self.slippage) + self.buy_cost))
want_pay = (self.account.cash * order_pct)
order_amount = (want_pay // cost)
if (order_amount < 100):
if self.rich_mode:
self.input_money()
else:
raise NotEnoughMoneyError()
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
elif (order_type == ORDER_TYPE_SHORT):
if (current_position.long_amount > 0):
raise InvalidOrderError('close the long position before open short')
cost = (current_price * ((1 + self.slippage) + self.buy_cost))
want_pay = (self.account.cash * order_pct)
order_amount = (want_pay // cost)
if (order_amount < 100):
if self.rich_mode:
self.input_money()
else:
raise NotEnoughMoneyError()
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
elif (order_type == ORDER_TYPE_CLOSE_LONG):
if (current_position.available_long > 0):
if (order_pct == 1.0):
order_amount = current_position.available_long
else:
order_amount = math.floor((current_position.available_long * order_pct))
if (order_amount != 0):
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
else:
self.logger.warning(f'{entity_id} available_long:{current_position.available_long} order_pct:{order_pct} order_amount:{order_amount}')
else:
raise NotEnoughPositionError()
elif (order_type == ORDER_TYPE_CLOSE_SHORT):
if (current_position.available_short > 0):
if (order_pct == 1.0):
order_amount = current_position.available_short
else:
order_amount = math.floor((current_position.available_short * order_pct))
if (order_amount != 0):
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
else:
self.logger.warning(f'{entity_id} available_long:{current_position.available_long} order_pct:{order_pct} order_amount:{order_amount}')
else:
raise Exception('not enough position') | 下单
Parameters
----------
entity_id : str
交易标的id
current_price : float
当前价格
current_timestamp: timestamp
下单的时间
order_amount : int
数量
order_pct : float
使用可用现金(仓位)的百分比,0.0-1.0
order_price : float
用于限价交易
order_type : {ORDER_TYPE_LONG,ORDER_TYPE_SHORT,ORDER_TYPE_CLOSE_LONG,ORDER_TYPE_CLOSE_SHORT}
交易类型
Returns | zvt/trader/account.py | order | Zuojinfang/zvt | 2 | python | def order(self, entity_id, current_price, current_timestamp, order_amount=0, order_pct=1.0, order_price=0, order_type=ORDER_TYPE_LONG, order_money=0):
'\n 下单\n\n Parameters\n ----------\n entity_id : str\n 交易标的id\n\n current_price : float\n 当前价格\n\n current_timestamp: timestamp\n 下单的时间\n\n order_amount : int\n 数量\n\n order_pct : float\n 使用可用现金(仓位)的百分比,0.0-1.0\n\n order_price : float\n 用于限价交易\n\n order_type : {ORDER_TYPE_LONG,ORDER_TYPE_SHORT,ORDER_TYPE_CLOSE_LONG,ORDER_TYPE_CLOSE_SHORT}\n 交易类型\n\n Returns\n\n '
if (order_price == 0):
current_position = self.get_current_position(entity_id=entity_id)
if (not current_position):
trading_t = self.entity_schema.get_trading_t()
current_position = Position(trader_name=self.trader_name, entity_id=entity_id, long_amount=0, available_long=0, average_long_price=0, short_amount=0, available_short=0, average_short_price=0, profit=0, value=0, trading_t=trading_t)
self.account.positions.append(current_position)
if (order_money > 0):
if (order_type == ORDER_TYPE_LONG):
if (current_position.short_amount > 0):
raise InvalidOrderError('close the short position before open long')
if (order_money > self.account.cash):
if self.rich_mode:
self.input_money()
else:
raise NotEnoughMoneyError()
cost = (current_price * ((1 + self.slippage) + self.buy_cost))
order_amount = (order_money // cost)
if (order_amount < 100):
if self.rich_mode:
self.input_money()
else:
raise NotEnoughMoneyError()
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
elif (order_type == ORDER_TYPE_SHORT):
if (current_position.long_amount > 0):
raise InvalidOrderError('close the long position before open short')
if (order_money > self.account.cash):
if self.rich_mode:
self.input_money()
else:
raise NotEnoughMoneyError()
cost = (current_price * ((1 + self.slippage) + self.buy_cost))
order_amount = (order_money // cost)
if (order_amount < 100):
if self.rich_mode:
self.input_money()
else:
raise NotEnoughMoneyError()
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
else:
raise InvalidOrderParamError('close long/short not support order_money')
elif (order_amount > 0):
if (order_type == ORDER_TYPE_LONG):
if (current_position.short_amount > 0):
raise InvalidOrderError('close the short position before open long')
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
elif (order_type == ORDER_TYPE_SHORT):
if (current_position.long_amount > 0):
raise InvalidOrderError('close the long position before open short')
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
elif (order_type == ORDER_TYPE_CLOSE_LONG):
if (current_position.available_long >= order_amount):
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
else:
raise NotEnoughPositionError()
elif (order_type == ORDER_TYPE_CLOSE_SHORT):
if (current_position.available_short >= order_amount):
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
else:
raise Exception('not enough position')
elif (0 < order_pct <= 1):
if (order_type == ORDER_TYPE_LONG):
if (current_position.short_amount > 0):
raise InvalidOrderError('close the short position before open long')
cost = (current_price * ((1 + self.slippage) + self.buy_cost))
want_pay = (self.account.cash * order_pct)
order_amount = (want_pay // cost)
if (order_amount < 100):
if self.rich_mode:
self.input_money()
else:
raise NotEnoughMoneyError()
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
elif (order_type == ORDER_TYPE_SHORT):
if (current_position.long_amount > 0):
raise InvalidOrderError('close the long position before open short')
cost = (current_price * ((1 + self.slippage) + self.buy_cost))
want_pay = (self.account.cash * order_pct)
order_amount = (want_pay // cost)
if (order_amount < 100):
if self.rich_mode:
self.input_money()
else:
raise NotEnoughMoneyError()
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
elif (order_type == ORDER_TYPE_CLOSE_LONG):
if (current_position.available_long > 0):
if (order_pct == 1.0):
order_amount = current_position.available_long
else:
order_amount = math.floor((current_position.available_long * order_pct))
if (order_amount != 0):
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
else:
self.logger.warning(f'{entity_id} available_long:{current_position.available_long} order_pct:{order_pct} order_amount:{order_amount}')
else:
raise NotEnoughPositionError()
elif (order_type == ORDER_TYPE_CLOSE_SHORT):
if (current_position.available_short > 0):
if (order_pct == 1.0):
order_amount = current_position.available_short
else:
order_amount = math.floor((current_position.available_short * order_pct))
if (order_amount != 0):
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
else:
self.logger.warning(f'{entity_id} available_long:{current_position.available_long} order_pct:{order_pct} order_amount:{order_amount}')
else:
raise Exception('not enough position') | def order(self, entity_id, current_price, current_timestamp, order_amount=0, order_pct=1.0, order_price=0, order_type=ORDER_TYPE_LONG, order_money=0):
'\n 下单\n\n Parameters\n ----------\n entity_id : str\n 交易标的id\n\n current_price : float\n 当前价格\n\n current_timestamp: timestamp\n 下单的时间\n\n order_amount : int\n 数量\n\n order_pct : float\n 使用可用现金(仓位)的百分比,0.0-1.0\n\n order_price : float\n 用于限价交易\n\n order_type : {ORDER_TYPE_LONG,ORDER_TYPE_SHORT,ORDER_TYPE_CLOSE_LONG,ORDER_TYPE_CLOSE_SHORT}\n 交易类型\n\n Returns\n\n '
if (order_price == 0):
current_position = self.get_current_position(entity_id=entity_id)
if (not current_position):
trading_t = self.entity_schema.get_trading_t()
current_position = Position(trader_name=self.trader_name, entity_id=entity_id, long_amount=0, available_long=0, average_long_price=0, short_amount=0, available_short=0, average_short_price=0, profit=0, value=0, trading_t=trading_t)
self.account.positions.append(current_position)
if (order_money > 0):
if (order_type == ORDER_TYPE_LONG):
if (current_position.short_amount > 0):
raise InvalidOrderError('close the short position before open long')
if (order_money > self.account.cash):
if self.rich_mode:
self.input_money()
else:
raise NotEnoughMoneyError()
cost = (current_price * ((1 + self.slippage) + self.buy_cost))
order_amount = (order_money // cost)
if (order_amount < 100):
if self.rich_mode:
self.input_money()
else:
raise NotEnoughMoneyError()
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
elif (order_type == ORDER_TYPE_SHORT):
if (current_position.long_amount > 0):
raise InvalidOrderError('close the long position before open short')
if (order_money > self.account.cash):
if self.rich_mode:
self.input_money()
else:
raise NotEnoughMoneyError()
cost = (current_price * ((1 + self.slippage) + self.buy_cost))
order_amount = (order_money // cost)
if (order_amount < 100):
if self.rich_mode:
self.input_money()
else:
raise NotEnoughMoneyError()
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
else:
raise InvalidOrderParamError('close long/short not support order_money')
elif (order_amount > 0):
if (order_type == ORDER_TYPE_LONG):
if (current_position.short_amount > 0):
raise InvalidOrderError('close the short position before open long')
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
elif (order_type == ORDER_TYPE_SHORT):
if (current_position.long_amount > 0):
raise InvalidOrderError('close the long position before open short')
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
elif (order_type == ORDER_TYPE_CLOSE_LONG):
if (current_position.available_long >= order_amount):
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
else:
raise NotEnoughPositionError()
elif (order_type == ORDER_TYPE_CLOSE_SHORT):
if (current_position.available_short >= order_amount):
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
else:
raise Exception('not enough position')
elif (0 < order_pct <= 1):
if (order_type == ORDER_TYPE_LONG):
if (current_position.short_amount > 0):
raise InvalidOrderError('close the short position before open long')
cost = (current_price * ((1 + self.slippage) + self.buy_cost))
want_pay = (self.account.cash * order_pct)
order_amount = (want_pay // cost)
if (order_amount < 100):
if self.rich_mode:
self.input_money()
else:
raise NotEnoughMoneyError()
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
elif (order_type == ORDER_TYPE_SHORT):
if (current_position.long_amount > 0):
raise InvalidOrderError('close the long position before open short')
cost = (current_price * ((1 + self.slippage) + self.buy_cost))
want_pay = (self.account.cash * order_pct)
order_amount = (want_pay // cost)
if (order_amount < 100):
if self.rich_mode:
self.input_money()
else:
raise NotEnoughMoneyError()
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
elif (order_type == ORDER_TYPE_CLOSE_LONG):
if (current_position.available_long > 0):
if (order_pct == 1.0):
order_amount = current_position.available_long
else:
order_amount = math.floor((current_position.available_long * order_pct))
if (order_amount != 0):
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
else:
self.logger.warning(f'{entity_id} available_long:{current_position.available_long} order_pct:{order_pct} order_amount:{order_amount}')
else:
raise NotEnoughPositionError()
elif (order_type == ORDER_TYPE_CLOSE_SHORT):
if (current_position.available_short > 0):
if (order_pct == 1.0):
order_amount = current_position.available_short
else:
order_amount = math.floor((current_position.available_short * order_pct))
if (order_amount != 0):
self.update_position(current_position, order_amount, current_price, order_type, current_timestamp)
else:
self.logger.warning(f'{entity_id} available_long:{current_position.available_long} order_pct:{order_pct} order_amount:{order_amount}')
else:
raise Exception('not enough position')<|docstring|>下单
Parameters
----------
entity_id : str
交易标的id
current_price : float
当前价格
current_timestamp: timestamp
下单的时间
order_amount : int
数量
order_pct : float
使用可用现金(仓位)的百分比,0.0-1.0
order_price : float
用于限价交易
order_type : {ORDER_TYPE_LONG,ORDER_TYPE_SHORT,ORDER_TYPE_CLOSE_LONG,ORDER_TYPE_CLOSE_SHORT}
交易类型
Returns<|endoftext|> |
766f81a6e78e20ff5b96c803cf7ca38432d0616d6a4d9761085876a42c0254bc | def is_in_onnx_export() -> bool:
'Returns whether it is in the middle of ONNX export.'
return GLOBALS.in_onnx_export | Returns whether it is in the middle of ONNX export. | torch/onnx/utils.py | is_in_onnx_export | TristanLaan/pytorch | 0 | python | def is_in_onnx_export() -> bool:
return GLOBALS.in_onnx_export | def is_in_onnx_export() -> bool:
return GLOBALS.in_onnx_export<|docstring|>Returns whether it is in the middle of ONNX export.<|endoftext|> |
e9666d55d9a0583d46754f73c453d4df937eecc04e15cf6e67c921daa7e003a5 | def warn_on_static_input_change(input_states):
"Warns that changes to input dictionaries and strings won't take effect in the traced ONNX graph.\n\n We accept dictionaries and strings as ONNX inputs, but they should be only for\n configuration use. we detect here if these inputs are modified, and if so we warn\n the user that the changes won't take effect in the traced ONNX graph.\n "
for (input, traced_input) in zip(input_states[0], input_states[1]):
if isinstance(input, dict):
if (list(input.keys()) != list(traced_input.keys())):
warning = 'We detected that you are modifying a dictionary that is an input to your model. Note that dictionaries are allowed as inputs in ONNX but they should be handled with care. Usages of dictionaries is not recommended, and should not be used except for configuration use. Also note that the order and values of the keys must remain the same. '
warnings.warn(warning)
elif isinstance(input, str):
if (input != traced_input):
warning = 'The model seems to have string inputs/outputs. Note that strings will not appear as inputs/outputs of the ONNX graph. '
warnings.warn(warning) | Warns that changes to input dictionaries and strings won't take effect in the traced ONNX graph.
We accept dictionaries and strings as ONNX inputs, but they should be only for
configuration use. we detect here if these inputs are modified, and if so we warn
the user that the changes won't take effect in the traced ONNX graph. | torch/onnx/utils.py | warn_on_static_input_change | TristanLaan/pytorch | 0 | python | def warn_on_static_input_change(input_states):
"Warns that changes to input dictionaries and strings won't take effect in the traced ONNX graph.\n\n We accept dictionaries and strings as ONNX inputs, but they should be only for\n configuration use. we detect here if these inputs are modified, and if so we warn\n the user that the changes won't take effect in the traced ONNX graph.\n "
for (input, traced_input) in zip(input_states[0], input_states[1]):
if isinstance(input, dict):
if (list(input.keys()) != list(traced_input.keys())):
warning = 'We detected that you are modifying a dictionary that is an input to your model. Note that dictionaries are allowed as inputs in ONNX but they should be handled with care. Usages of dictionaries is not recommended, and should not be used except for configuration use. Also note that the order and values of the keys must remain the same. '
warnings.warn(warning)
elif isinstance(input, str):
if (input != traced_input):
warning = 'The model seems to have string inputs/outputs. Note that strings will not appear as inputs/outputs of the ONNX graph. '
warnings.warn(warning) | def warn_on_static_input_change(input_states):
"Warns that changes to input dictionaries and strings won't take effect in the traced ONNX graph.\n\n We accept dictionaries and strings as ONNX inputs, but they should be only for\n configuration use. we detect here if these inputs are modified, and if so we warn\n the user that the changes won't take effect in the traced ONNX graph.\n "
for (input, traced_input) in zip(input_states[0], input_states[1]):
if isinstance(input, dict):
if (list(input.keys()) != list(traced_input.keys())):
warning = 'We detected that you are modifying a dictionary that is an input to your model. Note that dictionaries are allowed as inputs in ONNX but they should be handled with care. Usages of dictionaries is not recommended, and should not be used except for configuration use. Also note that the order and values of the keys must remain the same. '
warnings.warn(warning)
elif isinstance(input, str):
if (input != traced_input):
warning = 'The model seems to have string inputs/outputs. Note that strings will not appear as inputs/outputs of the ONNX graph. '
warnings.warn(warning)<|docstring|>Warns that changes to input dictionaries and strings won't take effect in the traced ONNX graph.
We accept dictionaries and strings as ONNX inputs, but they should be only for
configuration use. we detect here if these inputs are modified, and if so we warn
the user that the changes won't take effect in the traced ONNX graph.<|endoftext|> |
511e4e88763853e66e6fa6199d06ba16cd04f1cc598bebf2845d55e119493c1c | def _resolve_args_by_export_type(arg_name, arg_value, operator_export_type):
'Resolves the arguments that are ignored when export_type != operator_export_type.ONNX.'
if ((operator_export_type is not operator_export_type.ONNX) and _C_onnx._CAFFE2_ATEN_FALLBACK):
if (arg_value is True):
warnings.warn(f"'{arg_name}' can be set to True only when 'operator_export_type' is `ONNX`. Since 'operator_export_type' is not set to 'ONNX', '{arg_name}' argument will be ignored.")
arg_value = False
return arg_value | Resolves the arguments that are ignored when export_type != operator_export_type.ONNX. | torch/onnx/utils.py | _resolve_args_by_export_type | TristanLaan/pytorch | 0 | python | def _resolve_args_by_export_type(arg_name, arg_value, operator_export_type):
if ((operator_export_type is not operator_export_type.ONNX) and _C_onnx._CAFFE2_ATEN_FALLBACK):
if (arg_value is True):
warnings.warn(f"'{arg_name}' can be set to True only when 'operator_export_type' is `ONNX`. Since 'operator_export_type' is not set to 'ONNX', '{arg_name}' argument will be ignored.")
arg_value = False
return arg_value | def _resolve_args_by_export_type(arg_name, arg_value, operator_export_type):
if ((operator_export_type is not operator_export_type.ONNX) and _C_onnx._CAFFE2_ATEN_FALLBACK):
if (arg_value is True):
warnings.warn(f"'{arg_name}' can be set to True only when 'operator_export_type' is `ONNX`. Since 'operator_export_type' is not set to 'ONNX', '{arg_name}' argument will be ignored.")
arg_value = False
return arg_value<|docstring|>Resolves the arguments that are ignored when export_type != operator_export_type.ONNX.<|endoftext|> |
22e990228d6da3650a0570e5f988e21565f1cef89a6ae2ce65c85d77b2ce6c8c | def _decide_keep_init_as_input(keep_initializers_as_inputs: Optional[bool], operator_export_type: _C_onnx.OperatorExportTypes, opset_version: int):
'Decides whether the initializers in the graph should be listed as ONNX graph inputs.\n\n This method encapsulates the logic to decide whether the initializers in the graph\n should be listed as ONNX graph inputs (i.e., whether to choose ONNX IR v3 or v4).\n If keep_initializers_as_inputs is not specified (None), then we decide whether to keep\n initializers as graph inputs (val_keep_init_as_ip) based on export type. If export type\n is ONNX, then do not keep initializers as input (val_keep_init_as_ip=False). For all other\n export types keep initializers as input (val_keep_init_as_ip=True).\n If keep_initializers_as_inputs is specified, then respect it. Unless opset version <= 8,\n in which case it must be ignored because for opset version <= 8, all initializers MUST be\n part of graph input (only ONNX IR v3 is allowed), i.e. val_keep_init_as_ip=True.\n\n Special handling is needed for opset version 8 or lower, because irrespective\n of user input for keep_initializers_as_inputs, the graph must follow ONNX IR v3\n semantics, i.e. all initializers must be listed as ONNX graph input.\n '
if (opset_version < 9):
if (keep_initializers_as_inputs is False):
warnings.warn("Setting 'keep_initializers_as_inputs=False' for opset version8 or lower would lead to an invalid ONNX graph. Therefore, 'keep_initializers_as_inputs=False' is ignored during export.Exported model will have initializers as graph inputs (compliant to ONNX IR v3).")
return True
val_keep_init_as_ip = (True if (keep_initializers_as_inputs is None) else keep_initializers_as_inputs)
if ((keep_initializers_as_inputs is None) and (operator_export_type is _C_onnx.OperatorExportTypes.ONNX)):
val_keep_init_as_ip = False
return val_keep_init_as_ip | Decides whether the initializers in the graph should be listed as ONNX graph inputs.
This method encapsulates the logic to decide whether the initializers in the graph
should be listed as ONNX graph inputs (i.e., whether to choose ONNX IR v3 or v4).
If keep_initializers_as_inputs is not specified (None), then we decide whether to keep
initializers as graph inputs (val_keep_init_as_ip) based on export type. If export type
is ONNX, then do not keep initializers as input (val_keep_init_as_ip=False). For all other
export types keep initializers as input (val_keep_init_as_ip=True).
If keep_initializers_as_inputs is specified, then respect it. Unless opset version <= 8,
in which case it must be ignored because for opset version <= 8, all initializers MUST be
part of graph input (only ONNX IR v3 is allowed), i.e. val_keep_init_as_ip=True.
Special handling is needed for opset version 8 or lower, because irrespective
of user input for keep_initializers_as_inputs, the graph must follow ONNX IR v3
semantics, i.e. all initializers must be listed as ONNX graph input. | torch/onnx/utils.py | _decide_keep_init_as_input | TristanLaan/pytorch | 0 | python | def _decide_keep_init_as_input(keep_initializers_as_inputs: Optional[bool], operator_export_type: _C_onnx.OperatorExportTypes, opset_version: int):
'Decides whether the initializers in the graph should be listed as ONNX graph inputs.\n\n This method encapsulates the logic to decide whether the initializers in the graph\n should be listed as ONNX graph inputs (i.e., whether to choose ONNX IR v3 or v4).\n If keep_initializers_as_inputs is not specified (None), then we decide whether to keep\n initializers as graph inputs (val_keep_init_as_ip) based on export type. If export type\n is ONNX, then do not keep initializers as input (val_keep_init_as_ip=False). For all other\n export types keep initializers as input (val_keep_init_as_ip=True).\n If keep_initializers_as_inputs is specified, then respect it. Unless opset version <= 8,\n in which case it must be ignored because for opset version <= 8, all initializers MUST be\n part of graph input (only ONNX IR v3 is allowed), i.e. val_keep_init_as_ip=True.\n\n Special handling is needed for opset version 8 or lower, because irrespective\n of user input for keep_initializers_as_inputs, the graph must follow ONNX IR v3\n semantics, i.e. all initializers must be listed as ONNX graph input.\n '
if (opset_version < 9):
if (keep_initializers_as_inputs is False):
warnings.warn("Setting 'keep_initializers_as_inputs=False' for opset version8 or lower would lead to an invalid ONNX graph. Therefore, 'keep_initializers_as_inputs=False' is ignored during export.Exported model will have initializers as graph inputs (compliant to ONNX IR v3).")
return True
val_keep_init_as_ip = (True if (keep_initializers_as_inputs is None) else keep_initializers_as_inputs)
if ((keep_initializers_as_inputs is None) and (operator_export_type is _C_onnx.OperatorExportTypes.ONNX)):
val_keep_init_as_ip = False
return val_keep_init_as_ip | def _decide_keep_init_as_input(keep_initializers_as_inputs: Optional[bool], operator_export_type: _C_onnx.OperatorExportTypes, opset_version: int):
'Decides whether the initializers in the graph should be listed as ONNX graph inputs.\n\n This method encapsulates the logic to decide whether the initializers in the graph\n should be listed as ONNX graph inputs (i.e., whether to choose ONNX IR v3 or v4).\n If keep_initializers_as_inputs is not specified (None), then we decide whether to keep\n initializers as graph inputs (val_keep_init_as_ip) based on export type. If export type\n is ONNX, then do not keep initializers as input (val_keep_init_as_ip=False). For all other\n export types keep initializers as input (val_keep_init_as_ip=True).\n If keep_initializers_as_inputs is specified, then respect it. Unless opset version <= 8,\n in which case it must be ignored because for opset version <= 8, all initializers MUST be\n part of graph input (only ONNX IR v3 is allowed), i.e. val_keep_init_as_ip=True.\n\n Special handling is needed for opset version 8 or lower, because irrespective\n of user input for keep_initializers_as_inputs, the graph must follow ONNX IR v3\n semantics, i.e. all initializers must be listed as ONNX graph input.\n '
if (opset_version < 9):
if (keep_initializers_as_inputs is False):
warnings.warn("Setting 'keep_initializers_as_inputs=False' for opset version8 or lower would lead to an invalid ONNX graph. Therefore, 'keep_initializers_as_inputs=False' is ignored during export.Exported model will have initializers as graph inputs (compliant to ONNX IR v3).")
return True
val_keep_init_as_ip = (True if (keep_initializers_as_inputs is None) else keep_initializers_as_inputs)
if ((keep_initializers_as_inputs is None) and (operator_export_type is _C_onnx.OperatorExportTypes.ONNX)):
val_keep_init_as_ip = False
return val_keep_init_as_ip<|docstring|>Decides whether the initializers in the graph should be listed as ONNX graph inputs.
This method encapsulates the logic to decide whether the initializers in the graph
should be listed as ONNX graph inputs (i.e., whether to choose ONNX IR v3 or v4).
If keep_initializers_as_inputs is not specified (None), then we decide whether to keep
initializers as graph inputs (val_keep_init_as_ip) based on export type. If export type
is ONNX, then do not keep initializers as input (val_keep_init_as_ip=False). For all other
export types keep initializers as input (val_keep_init_as_ip=True).
If keep_initializers_as_inputs is specified, then respect it. Unless opset version <= 8,
in which case it must be ignored because for opset version <= 8, all initializers MUST be
part of graph input (only ONNX IR v3 is allowed), i.e. val_keep_init_as_ip=True.
Special handling is needed for opset version 8 or lower, because irrespective
of user input for keep_initializers_as_inputs, the graph must follow ONNX IR v3
semantics, i.e. all initializers must be listed as ONNX graph input.<|endoftext|> |
5d8739c87a007eb9b086d7ed3e23790536d23a3fa7561791fde18e5edbb519f1 | def _check_flatten_did_not_remove(original, jit_flattened):
'torch.jit._flatten removes None. Check if it did so in this case.'
def flatten(x):
if isinstance(x, (list, tuple)):
for inner in x:
(yield from flatten(inner))
elif isinstance(x, dict):
for inner in x.values():
(yield from flatten(inner))
else:
(yield x)
flattened_with_none = list(flatten(original))
num_none = (len(flattened_with_none) - len(jit_flattened))
assert (num_none >= 0)
if num_none:
raise ValueError(f"args contained {num_none} None's after flattening. When exporting a ScriptModule or ScriptFunction, no args may be None because that breaks type propagation.") | torch.jit._flatten removes None. Check if it did so in this case. | torch/onnx/utils.py | _check_flatten_did_not_remove | TristanLaan/pytorch | 0 | python | def _check_flatten_did_not_remove(original, jit_flattened):
def flatten(x):
if isinstance(x, (list, tuple)):
for inner in x:
(yield from flatten(inner))
elif isinstance(x, dict):
for inner in x.values():
(yield from flatten(inner))
else:
(yield x)
flattened_with_none = list(flatten(original))
num_none = (len(flattened_with_none) - len(jit_flattened))
assert (num_none >= 0)
if num_none:
raise ValueError(f"args contained {num_none} None's after flattening. When exporting a ScriptModule or ScriptFunction, no args may be None because that breaks type propagation.") | def _check_flatten_did_not_remove(original, jit_flattened):
def flatten(x):
if isinstance(x, (list, tuple)):
for inner in x:
(yield from flatten(inner))
elif isinstance(x, dict):
for inner in x.values():
(yield from flatten(inner))
else:
(yield x)
flattened_with_none = list(flatten(original))
num_none = (len(flattened_with_none) - len(jit_flattened))
assert (num_none >= 0)
if num_none:
raise ValueError(f"args contained {num_none} None's after flattening. When exporting a ScriptModule or ScriptFunction, no args may be None because that breaks type propagation.")<|docstring|>torch.jit._flatten removes None. Check if it did so in this case.<|endoftext|> |
8274118750c10da0bfdfc7f745c52899d0f4f4fb3758c1cff060dc5ebb8a2e40 | def _pre_trace_quant_model(model, args):
'Returns `torch.jit.trace(model, args)` if model is quantized. Otherwise do nothing and return\n original model.\n\n This is due to https://github.com/pytorch/pytorch/issues/75761.\n '
if (any((hasattr(m, '_packed_params') for m in getattr(model, 'modules', (lambda : []))())) or any((getattr(arg, 'is_quantized', False) for arg in args))):
return torch.jit.trace(model, args)
return model | Returns `torch.jit.trace(model, args)` if model is quantized. Otherwise do nothing and return
original model.
This is due to https://github.com/pytorch/pytorch/issues/75761. | torch/onnx/utils.py | _pre_trace_quant_model | TristanLaan/pytorch | 0 | python | def _pre_trace_quant_model(model, args):
'Returns `torch.jit.trace(model, args)` if model is quantized. Otherwise do nothing and return\n original model.\n\n This is due to https://github.com/pytorch/pytorch/issues/75761.\n '
if (any((hasattr(m, '_packed_params') for m in getattr(model, 'modules', (lambda : []))())) or any((getattr(arg, 'is_quantized', False) for arg in args))):
return torch.jit.trace(model, args)
return model | def _pre_trace_quant_model(model, args):
'Returns `torch.jit.trace(model, args)` if model is quantized. Otherwise do nothing and return\n original model.\n\n This is due to https://github.com/pytorch/pytorch/issues/75761.\n '
if (any((hasattr(m, '_packed_params') for m in getattr(model, 'modules', (lambda : []))())) or any((getattr(arg, 'is_quantized', False) for arg in args))):
return torch.jit.trace(model, args)
return model<|docstring|>Returns `torch.jit.trace(model, args)` if model is quantized. Otherwise do nothing and return
original model.
This is due to https://github.com/pytorch/pytorch/issues/75761.<|endoftext|> |
d8715c2b7eccc056606c48fc19befc5cfa2fd01822d34d2eda38b2366369bde9 | def _assign_onnx_node_name(graph, node_names):
'Takes in ONNX graph, and mapping from _C.Node to node name in exported ONNX ModelProto.\n\n Returns:\n graph (_C.Graph): A TorchScript IR Graph with ONNX nodes, where each _C.Node gets its name\n in exported ONNX ModelProto assigned as attribute ``onnx_name``.\n '
def n_fn(n, b_fn, node_names):
for b in n.blocks():
b_fn(b, node_names)
if (n in node_names):
n.s_('onnx_name', node_names[n])
def b_fn(b, node_names):
for n in b.nodes():
n_fn(n, b_fn, node_names)
b_fn(graph, node_names)
return graph | Takes in ONNX graph, and mapping from _C.Node to node name in exported ONNX ModelProto.
Returns:
graph (_C.Graph): A TorchScript IR Graph with ONNX nodes, where each _C.Node gets its name
in exported ONNX ModelProto assigned as attribute ``onnx_name``. | torch/onnx/utils.py | _assign_onnx_node_name | TristanLaan/pytorch | 0 | python | def _assign_onnx_node_name(graph, node_names):
'Takes in ONNX graph, and mapping from _C.Node to node name in exported ONNX ModelProto.\n\n Returns:\n graph (_C.Graph): A TorchScript IR Graph with ONNX nodes, where each _C.Node gets its name\n in exported ONNX ModelProto assigned as attribute ``onnx_name``.\n '
def n_fn(n, b_fn, node_names):
for b in n.blocks():
b_fn(b, node_names)
if (n in node_names):
n.s_('onnx_name', node_names[n])
def b_fn(b, node_names):
for n in b.nodes():
n_fn(n, b_fn, node_names)
b_fn(graph, node_names)
return graph | def _assign_onnx_node_name(graph, node_names):
'Takes in ONNX graph, and mapping from _C.Node to node name in exported ONNX ModelProto.\n\n Returns:\n graph (_C.Graph): A TorchScript IR Graph with ONNX nodes, where each _C.Node gets its name\n in exported ONNX ModelProto assigned as attribute ``onnx_name``.\n '
def n_fn(n, b_fn, node_names):
for b in n.blocks():
b_fn(b, node_names)
if (n in node_names):
n.s_('onnx_name', node_names[n])
def b_fn(b, node_names):
for n in b.nodes():
n_fn(n, b_fn, node_names)
b_fn(graph, node_names)
return graph<|docstring|>Takes in ONNX graph, and mapping from _C.Node to node name in exported ONNX ModelProto.
Returns:
graph (_C.Graph): A TorchScript IR Graph with ONNX nodes, where each _C.Node gets its name
in exported ONNX ModelProto assigned as attribute ``onnx_name``.<|endoftext|> |
06aa333a8a0add9055dab928911d9a14a2a84a07a3053801dd78d7fba353183e | def _model_to_graph(model, args, verbose=False, input_names=None, output_names=None, operator_export_type=_C_onnx.OperatorExportTypes.ONNX, do_constant_folding=True, _disable_torch_constant_prop=False, fixed_batch_size=False, training=None, dynamic_axes=None) -> Tuple[(_C.Graph, Dict[(str, torch.Tensor)], Optional[Union[(torch.Tensor, Tuple[torch.Tensor], List[torch.Tensor])]])]:
'Converts model into an ONNX graph.\n\n Returns:\n graph: A TorchScript IR Graph with ONNX nodes.\n params_dict: Dict from input param name to param value.\n torch_out: The output tensors resulting from the trace of ``model``.\n If ``model`` is a :class:`torch.jit.ScriptModule` or :class:`torch.jit.ScriptFunction`,\n this will be None, since we are not doing any tracing.\n '
if isinstance(args, (torch.Tensor, int, float, bool)):
args = (args,)
model = _pre_trace_quant_model(model, args)
(graph, params, torch_out, module) = _create_jit_graph(model, args)
params_dict = _get_named_param_dict(graph, params)
try:
graph = _optimize_graph(graph, operator_export_type, _disable_torch_constant_prop=_disable_torch_constant_prop, fixed_batch_size=fixed_batch_size, params_dict=params_dict, dynamic_axes=dynamic_axes, input_names=input_names, module=module)
except Exception as e:
torch.onnx.log('Torch IR graph at exception: ', graph)
raise
is_script = isinstance(model, (torch.jit.ScriptFunction, torch.jit.ScriptModule))
if is_script:
example_outputs = _get_example_outputs(model, args)
example_outputs_final = ()
for example_output in example_outputs:
example_outputs_final += unpack_quantized_tensor(example_output)
(out_vars, desc) = torch.jit._flatten(example_outputs_final)
_C._jit_pass_onnx_assign_output_shape(graph, out_vars, desc, GLOBALS.onnx_shape_inference, is_script)
else:
if (not isinstance(torch_out, (list, tuple))):
output_wrapped = [torch_out]
else:
output_wrapped = torch_out
(output_tensors, out_desc) = _C._jit_flatten(tuple(output_wrapped))
if (not any((getattr(out, 'is_quantized', False) for out in output_tensors))):
_C._jit_pass_onnx_assign_output_shape(graph, output_tensors, out_desc, GLOBALS.onnx_shape_inference, is_script)
_set_input_and_output_names(graph, input_names, output_names)
params_dict = _get_named_param_dict(graph, params)
if ((training is None) or (training == _C_onnx.TrainingMode.EVAL)):
params_dict = _C._jit_pass_onnx_eval_peephole(graph, params_dict)
if (do_constant_folding and (GLOBALS.export_onnx_opset_version in _constants.onnx_constant_folding_opsets)):
params_dict = _C._jit_pass_onnx_constant_fold(graph, params_dict, GLOBALS.export_onnx_opset_version)
_C._jit_pass_dce_allow_deleting_nodes_with_side_effects(graph)
if GLOBALS.onnx_shape_inference:
_C._jit_pass_onnx_graph_shape_type_inference(graph, params_dict, GLOBALS.export_onnx_opset_version)
params_dict = _C._jit_pass_onnx_eliminate_unused_items(graph, params_dict)
if (GLOBALS.export_onnx_opset_version < 9):
_C._jit_pass_onnx_cast_all_constant_to_floating(graph)
params_dict = _C._jit_pass_filter_non_tensor_arguments(params_dict)
_C._jit_decay_packed_param_input_types(graph)
_apply_friendly_debug_names(graph, params_dict)
return (graph, params_dict, torch_out) | Converts model into an ONNX graph.
Returns:
graph: A TorchScript IR Graph with ONNX nodes.
params_dict: Dict from input param name to param value.
torch_out: The output tensors resulting from the trace of ``model``.
If ``model`` is a :class:`torch.jit.ScriptModule` or :class:`torch.jit.ScriptFunction`,
this will be None, since we are not doing any tracing. | torch/onnx/utils.py | _model_to_graph | TristanLaan/pytorch | 0 | python | def _model_to_graph(model, args, verbose=False, input_names=None, output_names=None, operator_export_type=_C_onnx.OperatorExportTypes.ONNX, do_constant_folding=True, _disable_torch_constant_prop=False, fixed_batch_size=False, training=None, dynamic_axes=None) -> Tuple[(_C.Graph, Dict[(str, torch.Tensor)], Optional[Union[(torch.Tensor, Tuple[torch.Tensor], List[torch.Tensor])]])]:
'Converts model into an ONNX graph.\n\n Returns:\n graph: A TorchScript IR Graph with ONNX nodes.\n params_dict: Dict from input param name to param value.\n torch_out: The output tensors resulting from the trace of ``model``.\n If ``model`` is a :class:`torch.jit.ScriptModule` or :class:`torch.jit.ScriptFunction`,\n this will be None, since we are not doing any tracing.\n '
if isinstance(args, (torch.Tensor, int, float, bool)):
args = (args,)
model = _pre_trace_quant_model(model, args)
(graph, params, torch_out, module) = _create_jit_graph(model, args)
params_dict = _get_named_param_dict(graph, params)
try:
graph = _optimize_graph(graph, operator_export_type, _disable_torch_constant_prop=_disable_torch_constant_prop, fixed_batch_size=fixed_batch_size, params_dict=params_dict, dynamic_axes=dynamic_axes, input_names=input_names, module=module)
except Exception as e:
torch.onnx.log('Torch IR graph at exception: ', graph)
raise
is_script = isinstance(model, (torch.jit.ScriptFunction, torch.jit.ScriptModule))
if is_script:
example_outputs = _get_example_outputs(model, args)
example_outputs_final = ()
for example_output in example_outputs:
example_outputs_final += unpack_quantized_tensor(example_output)
(out_vars, desc) = torch.jit._flatten(example_outputs_final)
_C._jit_pass_onnx_assign_output_shape(graph, out_vars, desc, GLOBALS.onnx_shape_inference, is_script)
else:
if (not isinstance(torch_out, (list, tuple))):
output_wrapped = [torch_out]
else:
output_wrapped = torch_out
(output_tensors, out_desc) = _C._jit_flatten(tuple(output_wrapped))
if (not any((getattr(out, 'is_quantized', False) for out in output_tensors))):
_C._jit_pass_onnx_assign_output_shape(graph, output_tensors, out_desc, GLOBALS.onnx_shape_inference, is_script)
_set_input_and_output_names(graph, input_names, output_names)
params_dict = _get_named_param_dict(graph, params)
if ((training is None) or (training == _C_onnx.TrainingMode.EVAL)):
params_dict = _C._jit_pass_onnx_eval_peephole(graph, params_dict)
if (do_constant_folding and (GLOBALS.export_onnx_opset_version in _constants.onnx_constant_folding_opsets)):
params_dict = _C._jit_pass_onnx_constant_fold(graph, params_dict, GLOBALS.export_onnx_opset_version)
_C._jit_pass_dce_allow_deleting_nodes_with_side_effects(graph)
if GLOBALS.onnx_shape_inference:
_C._jit_pass_onnx_graph_shape_type_inference(graph, params_dict, GLOBALS.export_onnx_opset_version)
params_dict = _C._jit_pass_onnx_eliminate_unused_items(graph, params_dict)
if (GLOBALS.export_onnx_opset_version < 9):
_C._jit_pass_onnx_cast_all_constant_to_floating(graph)
params_dict = _C._jit_pass_filter_non_tensor_arguments(params_dict)
_C._jit_decay_packed_param_input_types(graph)
_apply_friendly_debug_names(graph, params_dict)
return (graph, params_dict, torch_out) | def _model_to_graph(model, args, verbose=False, input_names=None, output_names=None, operator_export_type=_C_onnx.OperatorExportTypes.ONNX, do_constant_folding=True, _disable_torch_constant_prop=False, fixed_batch_size=False, training=None, dynamic_axes=None) -> Tuple[(_C.Graph, Dict[(str, torch.Tensor)], Optional[Union[(torch.Tensor, Tuple[torch.Tensor], List[torch.Tensor])]])]:
'Converts model into an ONNX graph.\n\n Returns:\n graph: A TorchScript IR Graph with ONNX nodes.\n params_dict: Dict from input param name to param value.\n torch_out: The output tensors resulting from the trace of ``model``.\n If ``model`` is a :class:`torch.jit.ScriptModule` or :class:`torch.jit.ScriptFunction`,\n this will be None, since we are not doing any tracing.\n '
if isinstance(args, (torch.Tensor, int, float, bool)):
args = (args,)
model = _pre_trace_quant_model(model, args)
(graph, params, torch_out, module) = _create_jit_graph(model, args)
params_dict = _get_named_param_dict(graph, params)
try:
graph = _optimize_graph(graph, operator_export_type, _disable_torch_constant_prop=_disable_torch_constant_prop, fixed_batch_size=fixed_batch_size, params_dict=params_dict, dynamic_axes=dynamic_axes, input_names=input_names, module=module)
except Exception as e:
torch.onnx.log('Torch IR graph at exception: ', graph)
raise
is_script = isinstance(model, (torch.jit.ScriptFunction, torch.jit.ScriptModule))
if is_script:
example_outputs = _get_example_outputs(model, args)
example_outputs_final = ()
for example_output in example_outputs:
example_outputs_final += unpack_quantized_tensor(example_output)
(out_vars, desc) = torch.jit._flatten(example_outputs_final)
_C._jit_pass_onnx_assign_output_shape(graph, out_vars, desc, GLOBALS.onnx_shape_inference, is_script)
else:
if (not isinstance(torch_out, (list, tuple))):
output_wrapped = [torch_out]
else:
output_wrapped = torch_out
(output_tensors, out_desc) = _C._jit_flatten(tuple(output_wrapped))
if (not any((getattr(out, 'is_quantized', False) for out in output_tensors))):
_C._jit_pass_onnx_assign_output_shape(graph, output_tensors, out_desc, GLOBALS.onnx_shape_inference, is_script)
_set_input_and_output_names(graph, input_names, output_names)
params_dict = _get_named_param_dict(graph, params)
if ((training is None) or (training == _C_onnx.TrainingMode.EVAL)):
params_dict = _C._jit_pass_onnx_eval_peephole(graph, params_dict)
if (do_constant_folding and (GLOBALS.export_onnx_opset_version in _constants.onnx_constant_folding_opsets)):
params_dict = _C._jit_pass_onnx_constant_fold(graph, params_dict, GLOBALS.export_onnx_opset_version)
_C._jit_pass_dce_allow_deleting_nodes_with_side_effects(graph)
if GLOBALS.onnx_shape_inference:
_C._jit_pass_onnx_graph_shape_type_inference(graph, params_dict, GLOBALS.export_onnx_opset_version)
params_dict = _C._jit_pass_onnx_eliminate_unused_items(graph, params_dict)
if (GLOBALS.export_onnx_opset_version < 9):
_C._jit_pass_onnx_cast_all_constant_to_floating(graph)
params_dict = _C._jit_pass_filter_non_tensor_arguments(params_dict)
_C._jit_decay_packed_param_input_types(graph)
_apply_friendly_debug_names(graph, params_dict)
return (graph, params_dict, torch_out)<|docstring|>Converts model into an ONNX graph.
Returns:
graph: A TorchScript IR Graph with ONNX nodes.
params_dict: Dict from input param name to param value.
torch_out: The output tensors resulting from the trace of ``model``.
If ``model`` is a :class:`torch.jit.ScriptModule` or :class:`torch.jit.ScriptFunction`,
this will be None, since we are not doing any tracing.<|endoftext|> |
3b3d6c4e5ef67eca5b1f0a366a51733b6cfeee23af2181d790779b22225bde28 | def unconvertible_ops(model, args, training=_C_onnx.TrainingMode.EVAL, opset_version=None):
'\n Converts the model with operator_export_type set to\n torch.onnx.OperatorExportTypes.ONNX_FALLTHROUGH once in order to get a list of\n all the ops that are not supported/implemented by the exporter.\n\n Args:\n model: Same as corresponding arg to torch.onnx.export.\n args: Same as corresponding arg to torch.onnx.export.\n training: Same as corresponding arg to torch.onnx.export.\n opset_version: Same as corresponding arg to torch.onnx.export.\n\n Returns:\n Tuple[torch._C.Graph, List[str]], where the list includes the names\n of the unconvertible ops.\n '
opset_version = (opset_version or _constants.onnx_default_opset)
symbolic_helper._set_opset_version(opset_version)
with exporter_context(model, training, False):
args = _decide_input_format(model, args)
(graph, params_dict, torch_out) = _model_to_graph(model, args, operator_export_type=_C_onnx.OperatorExportTypes.ONNX_FALLTHROUGH)
unsupported_ops = list()
supported_namespaces = ('onnx', 'prim', 'quantized')
for node in graph.nodes():
if (node.kind().split(':')[0] not in supported_namespaces):
unsupported_ops.append(node.kind())
return (graph, unsupported_ops) | Converts the model with operator_export_type set to
torch.onnx.OperatorExportTypes.ONNX_FALLTHROUGH once in order to get a list of
all the ops that are not supported/implemented by the exporter.
Args:
model: Same as corresponding arg to torch.onnx.export.
args: Same as corresponding arg to torch.onnx.export.
training: Same as corresponding arg to torch.onnx.export.
opset_version: Same as corresponding arg to torch.onnx.export.
Returns:
Tuple[torch._C.Graph, List[str]], where the list includes the names
of the unconvertible ops. | torch/onnx/utils.py | unconvertible_ops | TristanLaan/pytorch | 0 | python | def unconvertible_ops(model, args, training=_C_onnx.TrainingMode.EVAL, opset_version=None):
'\n Converts the model with operator_export_type set to\n torch.onnx.OperatorExportTypes.ONNX_FALLTHROUGH once in order to get a list of\n all the ops that are not supported/implemented by the exporter.\n\n Args:\n model: Same as corresponding arg to torch.onnx.export.\n args: Same as corresponding arg to torch.onnx.export.\n training: Same as corresponding arg to torch.onnx.export.\n opset_version: Same as corresponding arg to torch.onnx.export.\n\n Returns:\n Tuple[torch._C.Graph, List[str]], where the list includes the names\n of the unconvertible ops.\n '
opset_version = (opset_version or _constants.onnx_default_opset)
symbolic_helper._set_opset_version(opset_version)
with exporter_context(model, training, False):
args = _decide_input_format(model, args)
(graph, params_dict, torch_out) = _model_to_graph(model, args, operator_export_type=_C_onnx.OperatorExportTypes.ONNX_FALLTHROUGH)
unsupported_ops = list()
supported_namespaces = ('onnx', 'prim', 'quantized')
for node in graph.nodes():
if (node.kind().split(':')[0] not in supported_namespaces):
unsupported_ops.append(node.kind())
return (graph, unsupported_ops) | def unconvertible_ops(model, args, training=_C_onnx.TrainingMode.EVAL, opset_version=None):
'\n Converts the model with operator_export_type set to\n torch.onnx.OperatorExportTypes.ONNX_FALLTHROUGH once in order to get a list of\n all the ops that are not supported/implemented by the exporter.\n\n Args:\n model: Same as corresponding arg to torch.onnx.export.\n args: Same as corresponding arg to torch.onnx.export.\n training: Same as corresponding arg to torch.onnx.export.\n opset_version: Same as corresponding arg to torch.onnx.export.\n\n Returns:\n Tuple[torch._C.Graph, List[str]], where the list includes the names\n of the unconvertible ops.\n '
opset_version = (opset_version or _constants.onnx_default_opset)
symbolic_helper._set_opset_version(opset_version)
with exporter_context(model, training, False):
args = _decide_input_format(model, args)
(graph, params_dict, torch_out) = _model_to_graph(model, args, operator_export_type=_C_onnx.OperatorExportTypes.ONNX_FALLTHROUGH)
unsupported_ops = list()
supported_namespaces = ('onnx', 'prim', 'quantized')
for node in graph.nodes():
if (node.kind().split(':')[0] not in supported_namespaces):
unsupported_ops.append(node.kind())
return (graph, unsupported_ops)<|docstring|>Converts the model with operator_export_type set to
torch.onnx.OperatorExportTypes.ONNX_FALLTHROUGH once in order to get a list of
all the ops that are not supported/implemented by the exporter.
Args:
model: Same as corresponding arg to torch.onnx.export.
args: Same as corresponding arg to torch.onnx.export.
training: Same as corresponding arg to torch.onnx.export.
opset_version: Same as corresponding arg to torch.onnx.export.
Returns:
Tuple[torch._C.Graph, List[str]], where the list includes the names
of the unconvertible ops.<|endoftext|> |
1f6233c13f1f8131b216da839bb474e7a0949ec2989739ead1a0262ecdfbb443 | def _run_symbolic_method(g, op_name, symbolic_fn, args):
'\n This trampoline function gets invoked for every symbolic method\n call from C++.\n '
try:
return symbolic_fn(g, *args)
except TypeError as e:
e.args = (f'{e.args[0]} (occurred when translating {op_name})',)
raise | This trampoline function gets invoked for every symbolic method
call from C++. | torch/onnx/utils.py | _run_symbolic_method | TristanLaan/pytorch | 0 | python | def _run_symbolic_method(g, op_name, symbolic_fn, args):
'\n This trampoline function gets invoked for every symbolic method\n call from C++.\n '
try:
return symbolic_fn(g, *args)
except TypeError as e:
e.args = (f'{e.args[0]} (occurred when translating {op_name})',)
raise | def _run_symbolic_method(g, op_name, symbolic_fn, args):
'\n This trampoline function gets invoked for every symbolic method\n call from C++.\n '
try:
return symbolic_fn(g, *args)
except TypeError as e:
e.args = (f'{e.args[0]} (occurred when translating {op_name})',)
raise<|docstring|>This trampoline function gets invoked for every symbolic method
call from C++.<|endoftext|> |
38ca1403031d500f8a17e070eb67ede22699e6d92abf5e6090bc7a8617d982e6 | def _find_symbolic_in_registry(domain: str, op_name: str, opset_version: int, operator_export_type: _C_onnx.OperatorExportTypes) -> Optional[Callable]:
'Looks up for the symbolic function in the registry.\n\n Args:\n domain: The domain of the symbolic function.\n op_name: The name of the op.\n opset_version: Currect opset used.\n operator_export_type: An enum in _C_onnx.OperatorExportTypes.\n\n Returns:\n The symbolic function if found, None otherwise.\n '
if (not symbolic_registry.is_registered_op(op_name, domain, opset_version)):
if (operator_export_type == _C_onnx.OperatorExportTypes.ONNX_FALLTHROUGH):
return None
return symbolic_registry.get_registered_op(op_name, domain, opset_version) | Looks up for the symbolic function in the registry.
Args:
domain: The domain of the symbolic function.
op_name: The name of the op.
opset_version: Currect opset used.
operator_export_type: An enum in _C_onnx.OperatorExportTypes.
Returns:
The symbolic function if found, None otherwise. | torch/onnx/utils.py | _find_symbolic_in_registry | TristanLaan/pytorch | 0 | python | def _find_symbolic_in_registry(domain: str, op_name: str, opset_version: int, operator_export_type: _C_onnx.OperatorExportTypes) -> Optional[Callable]:
'Looks up for the symbolic function in the registry.\n\n Args:\n domain: The domain of the symbolic function.\n op_name: The name of the op.\n opset_version: Currect opset used.\n operator_export_type: An enum in _C_onnx.OperatorExportTypes.\n\n Returns:\n The symbolic function if found, None otherwise.\n '
if (not symbolic_registry.is_registered_op(op_name, domain, opset_version)):
if (operator_export_type == _C_onnx.OperatorExportTypes.ONNX_FALLTHROUGH):
return None
return symbolic_registry.get_registered_op(op_name, domain, opset_version) | def _find_symbolic_in_registry(domain: str, op_name: str, opset_version: int, operator_export_type: _C_onnx.OperatorExportTypes) -> Optional[Callable]:
'Looks up for the symbolic function in the registry.\n\n Args:\n domain: The domain of the symbolic function.\n op_name: The name of the op.\n opset_version: Currect opset used.\n operator_export_type: An enum in _C_onnx.OperatorExportTypes.\n\n Returns:\n The symbolic function if found, None otherwise.\n '
if (not symbolic_registry.is_registered_op(op_name, domain, opset_version)):
if (operator_export_type == _C_onnx.OperatorExportTypes.ONNX_FALLTHROUGH):
return None
return symbolic_registry.get_registered_op(op_name, domain, opset_version)<|docstring|>Looks up for the symbolic function in the registry.
Args:
domain: The domain of the symbolic function.
op_name: The name of the op.
opset_version: Currect opset used.
operator_export_type: An enum in _C_onnx.OperatorExportTypes.
Returns:
The symbolic function if found, None otherwise.<|endoftext|> |
439dda34d6ca3d428c5ab7f4efd9c55f7941c66447065305cc822642ed09ed1c | def _need_symbolic_context(symbolic_fn) -> bool:
'Checks if the first argument to symbolic_fn is annotated as type `torch.onnx.SymbolicContext`.'
params = tuple(inspect.signature(symbolic_fn).parameters.values())
if (not params):
return False
first_param_name = params[0].name
type_hints = typing.get_type_hints(symbolic_fn)
if (first_param_name not in type_hints):
return False
param_type = type_hints[first_param_name]
return issubclass(param_type, _exporter_states.SymbolicContext) | Checks if the first argument to symbolic_fn is annotated as type `torch.onnx.SymbolicContext`. | torch/onnx/utils.py | _need_symbolic_context | TristanLaan/pytorch | 0 | python | def _need_symbolic_context(symbolic_fn) -> bool:
params = tuple(inspect.signature(symbolic_fn).parameters.values())
if (not params):
return False
first_param_name = params[0].name
type_hints = typing.get_type_hints(symbolic_fn)
if (first_param_name not in type_hints):
return False
param_type = type_hints[first_param_name]
return issubclass(param_type, _exporter_states.SymbolicContext) | def _need_symbolic_context(symbolic_fn) -> bool:
params = tuple(inspect.signature(symbolic_fn).parameters.values())
if (not params):
return False
first_param_name = params[0].name
type_hints = typing.get_type_hints(symbolic_fn)
if (first_param_name not in type_hints):
return False
param_type = type_hints[first_param_name]
return issubclass(param_type, _exporter_states.SymbolicContext)<|docstring|>Checks if the first argument to symbolic_fn is annotated as type `torch.onnx.SymbolicContext`.<|endoftext|> |
3c8222f0bedaad05d730f9278202005be6d40be3d4fa0f8138bb4ab1cbd3db3f | def _run_symbolic_function(g: _C.Graph, block: _C.Block, n: _C.Node, inputs: Any, env: Dict[(_C.Value, _C.Value)], operator_export_type=_C_onnx.OperatorExportTypes.ONNX) -> Optional[Union[(_C.Value, Tuple[(_C.Value, ...)])]]:
'Runs a symbolic function.\n\n The function is used in C++ to export the node to ONNX.\n\n Returns:\n A single or a tuple of Values.\n None when the node gets cloned as is into the new graph.\n '
opset_version = GLOBALS.export_onnx_opset_version
symbolic_helper.is_caffe2_aten_fallback = symbolic_helper.is_caffe2_aten_fallback
if n.kind().endswith('_'):
ns_op_name = n.kind()[:(- 1)]
else:
ns_op_name = n.kind()
(ns, op_name) = ns_op_name.split('::')
try:
symbolic_registry.register_version('', opset_version)
if (symbolic_helper.is_caffe2_aten_fallback() and (opset_version == 9)):
symbolic_caffe2.register_quantized_ops('caffe2', opset_version)
if (ns == 'aten'):
domain = ''
elif ((ns == 'quantized') and symbolic_helper.is_caffe2_aten_fallback()):
domain = 'caffe2'
else:
domain = ns
if symbolic_registry.is_registered_op(op_name, domain, opset_version):
symbolic_fn = _find_symbolic_in_registry(domain, op_name, opset_version, operator_export_type)
assert (symbolic_fn is not None)
attrs = {k: n[k] for k in n.attributeNames()}
if _need_symbolic_context(symbolic_fn):
ctx = _exporter_states.SymbolicContext(_params_dict, env, n, block)
return symbolic_fn(ctx, g, *inputs, **attrs)
if (op_name == 'PythonOp'):
inputs = (n, *inputs)
return symbolic_fn(g, *inputs, **attrs)
elif (ns == 'onnx'):
attrs = {((k + '_') + n.kindOf(k)[0]): n[k] for k in n.attributeNames()}
return g.op(op_name, *inputs, **attrs, outputs=n.outputsSize())
elif _should_aten_fallback(ns, op_name, opset_version, operator_export_type):
attrs = {((k + '_') + n.kindOf(k)[0]): n[k] for k in n.attributeNames()}
outputs = n.outputsSize()
attrs['outputs'] = outputs
return g.at(op_name, *inputs, overload_name=_get_aten_op_overload_name(n), **attrs)
else:
raise errors.UnsupportedOperatorError(domain, op_name, opset_version, symbolic_registry.get_op_supported_version(op_name, domain, opset_version))
except RuntimeError:
if (operator_export_type == _C_onnx.OperatorExportTypes.ONNX_FALLTHROUGH):
return None
elif ((operator_export_type == _C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK) and (not symbolic_helper.is_caffe2_aten_fallback())):
attrs = {((k + '_') + n.kindOf(k)[0]): n[k] for k in n.attributeNames()}
return g.at(op_name, *inputs, overload_name=_get_aten_op_overload_name(n), **attrs)
raise
except TypeError as e:
e.args = (f'''{e.args[0]}
(Occurred when translating {op_name}).''',)
raise | Runs a symbolic function.
The function is used in C++ to export the node to ONNX.
Returns:
A single or a tuple of Values.
None when the node gets cloned as is into the new graph. | torch/onnx/utils.py | _run_symbolic_function | TristanLaan/pytorch | 0 | python | def _run_symbolic_function(g: _C.Graph, block: _C.Block, n: _C.Node, inputs: Any, env: Dict[(_C.Value, _C.Value)], operator_export_type=_C_onnx.OperatorExportTypes.ONNX) -> Optional[Union[(_C.Value, Tuple[(_C.Value, ...)])]]:
'Runs a symbolic function.\n\n The function is used in C++ to export the node to ONNX.\n\n Returns:\n A single or a tuple of Values.\n None when the node gets cloned as is into the new graph.\n '
opset_version = GLOBALS.export_onnx_opset_version
symbolic_helper.is_caffe2_aten_fallback = symbolic_helper.is_caffe2_aten_fallback
if n.kind().endswith('_'):
ns_op_name = n.kind()[:(- 1)]
else:
ns_op_name = n.kind()
(ns, op_name) = ns_op_name.split('::')
try:
symbolic_registry.register_version(, opset_version)
if (symbolic_helper.is_caffe2_aten_fallback() and (opset_version == 9)):
symbolic_caffe2.register_quantized_ops('caffe2', opset_version)
if (ns == 'aten'):
domain =
elif ((ns == 'quantized') and symbolic_helper.is_caffe2_aten_fallback()):
domain = 'caffe2'
else:
domain = ns
if symbolic_registry.is_registered_op(op_name, domain, opset_version):
symbolic_fn = _find_symbolic_in_registry(domain, op_name, opset_version, operator_export_type)
assert (symbolic_fn is not None)
attrs = {k: n[k] for k in n.attributeNames()}
if _need_symbolic_context(symbolic_fn):
ctx = _exporter_states.SymbolicContext(_params_dict, env, n, block)
return symbolic_fn(ctx, g, *inputs, **attrs)
if (op_name == 'PythonOp'):
inputs = (n, *inputs)
return symbolic_fn(g, *inputs, **attrs)
elif (ns == 'onnx'):
attrs = {((k + '_') + n.kindOf(k)[0]): n[k] for k in n.attributeNames()}
return g.op(op_name, *inputs, **attrs, outputs=n.outputsSize())
elif _should_aten_fallback(ns, op_name, opset_version, operator_export_type):
attrs = {((k + '_') + n.kindOf(k)[0]): n[k] for k in n.attributeNames()}
outputs = n.outputsSize()
attrs['outputs'] = outputs
return g.at(op_name, *inputs, overload_name=_get_aten_op_overload_name(n), **attrs)
else:
raise errors.UnsupportedOperatorError(domain, op_name, opset_version, symbolic_registry.get_op_supported_version(op_name, domain, opset_version))
except RuntimeError:
if (operator_export_type == _C_onnx.OperatorExportTypes.ONNX_FALLTHROUGH):
return None
elif ((operator_export_type == _C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK) and (not symbolic_helper.is_caffe2_aten_fallback())):
attrs = {((k + '_') + n.kindOf(k)[0]): n[k] for k in n.attributeNames()}
return g.at(op_name, *inputs, overload_name=_get_aten_op_overload_name(n), **attrs)
raise
except TypeError as e:
e.args = (f'{e.args[0]}
(Occurred when translating {op_name}).',)
raise | def _run_symbolic_function(g: _C.Graph, block: _C.Block, n: _C.Node, inputs: Any, env: Dict[(_C.Value, _C.Value)], operator_export_type=_C_onnx.OperatorExportTypes.ONNX) -> Optional[Union[(_C.Value, Tuple[(_C.Value, ...)])]]:
'Runs a symbolic function.\n\n The function is used in C++ to export the node to ONNX.\n\n Returns:\n A single or a tuple of Values.\n None when the node gets cloned as is into the new graph.\n '
opset_version = GLOBALS.export_onnx_opset_version
symbolic_helper.is_caffe2_aten_fallback = symbolic_helper.is_caffe2_aten_fallback
if n.kind().endswith('_'):
ns_op_name = n.kind()[:(- 1)]
else:
ns_op_name = n.kind()
(ns, op_name) = ns_op_name.split('::')
try:
symbolic_registry.register_version(, opset_version)
if (symbolic_helper.is_caffe2_aten_fallback() and (opset_version == 9)):
symbolic_caffe2.register_quantized_ops('caffe2', opset_version)
if (ns == 'aten'):
domain =
elif ((ns == 'quantized') and symbolic_helper.is_caffe2_aten_fallback()):
domain = 'caffe2'
else:
domain = ns
if symbolic_registry.is_registered_op(op_name, domain, opset_version):
symbolic_fn = _find_symbolic_in_registry(domain, op_name, opset_version, operator_export_type)
assert (symbolic_fn is not None)
attrs = {k: n[k] for k in n.attributeNames()}
if _need_symbolic_context(symbolic_fn):
ctx = _exporter_states.SymbolicContext(_params_dict, env, n, block)
return symbolic_fn(ctx, g, *inputs, **attrs)
if (op_name == 'PythonOp'):
inputs = (n, *inputs)
return symbolic_fn(g, *inputs, **attrs)
elif (ns == 'onnx'):
attrs = {((k + '_') + n.kindOf(k)[0]): n[k] for k in n.attributeNames()}
return g.op(op_name, *inputs, **attrs, outputs=n.outputsSize())
elif _should_aten_fallback(ns, op_name, opset_version, operator_export_type):
attrs = {((k + '_') + n.kindOf(k)[0]): n[k] for k in n.attributeNames()}
outputs = n.outputsSize()
attrs['outputs'] = outputs
return g.at(op_name, *inputs, overload_name=_get_aten_op_overload_name(n), **attrs)
else:
raise errors.UnsupportedOperatorError(domain, op_name, opset_version, symbolic_registry.get_op_supported_version(op_name, domain, opset_version))
except RuntimeError:
if (operator_export_type == _C_onnx.OperatorExportTypes.ONNX_FALLTHROUGH):
return None
elif ((operator_export_type == _C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK) and (not symbolic_helper.is_caffe2_aten_fallback())):
attrs = {((k + '_') + n.kindOf(k)[0]): n[k] for k in n.attributeNames()}
return g.at(op_name, *inputs, overload_name=_get_aten_op_overload_name(n), **attrs)
raise
except TypeError as e:
e.args = (f'{e.args[0]}
(Occurred when translating {op_name}).',)
raise<|docstring|>Runs a symbolic function.
The function is used in C++ to export the node to ONNX.
Returns:
A single or a tuple of Values.
None when the node gets cloned as is into the new graph.<|endoftext|> |
3e2156b707239fbc5b83a71f404910df2edd3deea9d2c49544a83d829652765e | def register_custom_op_symbolic(symbolic_name, symbolic_fn, opset_version):
'Registers a symbolic function for a custom operator.\n\n When the user registers symbolic for custom/contrib ops,\n it is highly recommended to add shape inference for that operator via setType API,\n otherwise the exported graph may have incorrect shape inference in some extreme cases.\n An example of setType is `test_aten_embedding_2` in `test_operators.py`.\n '
(ns, op_name) = get_ns_op_name_from_custom_op(symbolic_name)
for version in itertools.chain(_constants.onnx_stable_opsets, [_constants.onnx_main_opset]):
if (version >= opset_version):
symbolic_registry.register_op(op_name, symbolic_fn, ns, version) | Registers a symbolic function for a custom operator.
When the user registers symbolic for custom/contrib ops,
it is highly recommended to add shape inference for that operator via setType API,
otherwise the exported graph may have incorrect shape inference in some extreme cases.
An example of setType is `test_aten_embedding_2` in `test_operators.py`. | torch/onnx/utils.py | register_custom_op_symbolic | TristanLaan/pytorch | 0 | python | def register_custom_op_symbolic(symbolic_name, symbolic_fn, opset_version):
'Registers a symbolic function for a custom operator.\n\n When the user registers symbolic for custom/contrib ops,\n it is highly recommended to add shape inference for that operator via setType API,\n otherwise the exported graph may have incorrect shape inference in some extreme cases.\n An example of setType is `test_aten_embedding_2` in `test_operators.py`.\n '
(ns, op_name) = get_ns_op_name_from_custom_op(symbolic_name)
for version in itertools.chain(_constants.onnx_stable_opsets, [_constants.onnx_main_opset]):
if (version >= opset_version):
symbolic_registry.register_op(op_name, symbolic_fn, ns, version) | def register_custom_op_symbolic(symbolic_name, symbolic_fn, opset_version):
'Registers a symbolic function for a custom operator.\n\n When the user registers symbolic for custom/contrib ops,\n it is highly recommended to add shape inference for that operator via setType API,\n otherwise the exported graph may have incorrect shape inference in some extreme cases.\n An example of setType is `test_aten_embedding_2` in `test_operators.py`.\n '
(ns, op_name) = get_ns_op_name_from_custom_op(symbolic_name)
for version in itertools.chain(_constants.onnx_stable_opsets, [_constants.onnx_main_opset]):
if (version >= opset_version):
symbolic_registry.register_op(op_name, symbolic_fn, ns, version)<|docstring|>Registers a symbolic function for a custom operator.
When the user registers symbolic for custom/contrib ops,
it is highly recommended to add shape inference for that operator via setType API,
otherwise the exported graph may have incorrect shape inference in some extreme cases.
An example of setType is `test_aten_embedding_2` in `test_operators.py`.<|endoftext|> |
6a1c0167a073e0046d9f22b0dcbd542d4622fa91a96613d874b01429353614e1 | def _validate_dynamic_axes(dynamic_axes, model, input_names, output_names):
'Ensures dynamic axes argument is follows the expected format.'
if (len(dynamic_axes) == 0):
return
if hasattr(model, 'graph'):
if ((input_names is None) or (len(input_names) == 0)):
input_names = [x.debugName() for x in model.graph.inputs()]
if ((output_names is None) or (len(output_names) == 0)):
output_names = [y.debugName() for y in model.graph.outputs()]
valid_names = set(((input_names or []) + (output_names or [])))
for (key, value) in dynamic_axes.items():
if (key not in valid_names):
warnings.warn(f'Provided key {key} for dynamic axes is not a valid input/output name')
if isinstance(value, list):
warnings.warn(f'No names were found for specified dynamic axes of provided input.Automatically generated names will be applied to each dynamic axes of input {key}')
value_dict = {}
for (i, x) in enumerate(value):
if (not isinstance(x, int)):
raise ValueError('The type of axis index is expected to be an integer')
if (x in value_dict):
warnings.warn(f'Duplicate dynamic axis index {x} was provided for input {key}.')
else:
value_dict[x] = ((str(key) + '_dynamic_axes_') + str((i + 1)))
dynamic_axes[key] = value_dict | Ensures dynamic axes argument is follows the expected format. | torch/onnx/utils.py | _validate_dynamic_axes | TristanLaan/pytorch | 0 | python | def _validate_dynamic_axes(dynamic_axes, model, input_names, output_names):
if (len(dynamic_axes) == 0):
return
if hasattr(model, 'graph'):
if ((input_names is None) or (len(input_names) == 0)):
input_names = [x.debugName() for x in model.graph.inputs()]
if ((output_names is None) or (len(output_names) == 0)):
output_names = [y.debugName() for y in model.graph.outputs()]
valid_names = set(((input_names or []) + (output_names or [])))
for (key, value) in dynamic_axes.items():
if (key not in valid_names):
warnings.warn(f'Provided key {key} for dynamic axes is not a valid input/output name')
if isinstance(value, list):
warnings.warn(f'No names were found for specified dynamic axes of provided input.Automatically generated names will be applied to each dynamic axes of input {key}')
value_dict = {}
for (i, x) in enumerate(value):
if (not isinstance(x, int)):
raise ValueError('The type of axis index is expected to be an integer')
if (x in value_dict):
warnings.warn(f'Duplicate dynamic axis index {x} was provided for input {key}.')
else:
value_dict[x] = ((str(key) + '_dynamic_axes_') + str((i + 1)))
dynamic_axes[key] = value_dict | def _validate_dynamic_axes(dynamic_axes, model, input_names, output_names):
if (len(dynamic_axes) == 0):
return
if hasattr(model, 'graph'):
if ((input_names is None) or (len(input_names) == 0)):
input_names = [x.debugName() for x in model.graph.inputs()]
if ((output_names is None) or (len(output_names) == 0)):
output_names = [y.debugName() for y in model.graph.outputs()]
valid_names = set(((input_names or []) + (output_names or [])))
for (key, value) in dynamic_axes.items():
if (key not in valid_names):
warnings.warn(f'Provided key {key} for dynamic axes is not a valid input/output name')
if isinstance(value, list):
warnings.warn(f'No names were found for specified dynamic axes of provided input.Automatically generated names will be applied to each dynamic axes of input {key}')
value_dict = {}
for (i, x) in enumerate(value):
if (not isinstance(x, int)):
raise ValueError('The type of axis index is expected to be an integer')
if (x in value_dict):
warnings.warn(f'Duplicate dynamic axis index {x} was provided for input {key}.')
else:
value_dict[x] = ((str(key) + '_dynamic_axes_') + str((i + 1)))
dynamic_axes[key] = value_dict<|docstring|>Ensures dynamic axes argument is follows the expected format.<|endoftext|> |
a8629ea98abd09c708cd028336e8291c8d49bfe88c8fdcbf062dbfd26ab1d6e5 | def show_mesh_ipython_nb(self):
'\n Show the mesh by plotting the mesh points.\n matplotlib.pyplot.plot is not used\n because this method is used image inline ipython notebook.\n '
list_values_y = ([0] * len(self.mesh))
plt.scatter(self.mesh, list_values_y) | Show the mesh by plotting the mesh points.
matplotlib.pyplot.plot is not used
because this method is used image inline ipython notebook. | shocktube1dcalc/generator_mesh.py | show_mesh_ipython_nb | yezhengkai/shocktube1dcalc | 2 | python | def show_mesh_ipython_nb(self):
'\n Show the mesh by plotting the mesh points.\n matplotlib.pyplot.plot is not used\n because this method is used image inline ipython notebook.\n '
list_values_y = ([0] * len(self.mesh))
plt.scatter(self.mesh, list_values_y) | def show_mesh_ipython_nb(self):
'\n Show the mesh by plotting the mesh points.\n matplotlib.pyplot.plot is not used\n because this method is used image inline ipython notebook.\n '
list_values_y = ([0] * len(self.mesh))
plt.scatter(self.mesh, list_values_y)<|docstring|>Show the mesh by plotting the mesh points.
matplotlib.pyplot.plot is not used
because this method is used image inline ipython notebook.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.