problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_10162 | rasdani/github-patches | git_diff | pytorch__text-76 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Consistency with sorting: `sort=True`
**Problem:**
```
train_iter, dev_iter, test_iter = data.BucketIterator.splits(
(train, dev, test),
batch_sizes=(32, 256, 256),
sort_key=lambda x: len(x.input),
sort=True,
device=-1) # Use CPU
```
If `sort=True` and `train=True`, then the `train_iter` batches are shuffled. This behavior is unexpected.
**Cause:**
Because by default `self.shuffle=True` is `train=True`. Then https://github.com/pytorch/text/blob/master/torchtext/data/iterator.py#L113 `shuffle` overrides `sort`.
**Possible Solution:**
`sort=True` should override `shuffle=None and train=True`.
</issue>
<code>
[start of torchtext/data/iterator.py]
1 import math
2 import random
3 from contextlib import contextmanager
4 from copy import deepcopy
5
6 from .batch import Batch
7 from .dataset import Dataset
8
9
10 class RandomShuffler(object):
11 """Use random functions while keeping track of the random state to make it
12 reproducible and deterministic."""
13
14 def __init__(self, random_state=None):
15 self._random_state = random_state
16 if self._random_state is None:
17 self._random_state = random.getstate()
18
19 @contextmanager
20 def use_internal_state(self):
21 """Use a specific RNG state."""
22 old_state = random.getstate()
23 random.setstate(self._random_state)
24 yield
25 self._random_state = random.getstate()
26 random.setstate(old_state)
27
28 @property
29 def random_state(self):
30 return deepcopy(self._random_state)
31
32 @random_state.setter
33 def random_state(self, s):
34 self._random_state = s
35
36 def __call__(self, data):
37 """Shuffle and return a new list."""
38 with self.use_internal_state():
39 return random.sample(data, len(data))
40
41
42 class Iterator(object):
43 """Defines an iterator that loads batches of data from a Dataset.
44
45 Attributes:
46 dataset: The Dataset object to load Examples from.
47 batch_size: Batch size.
48 batch_size_fn: Function of three arguments (new example to add, current
49 count of examples in the batch, and current effective batch size)
50 that returns the new effective batch size resulting from adding
51 that example to a batch. This is useful for dynamic batching, where
52 this function would add to the current effective batch size the
53 number of tokens in the new example.
54 sort_key: A key to use for sorting examples in order to batch together
55 examples with similar lengths and minimize padding. The sort_key
56 provided to the Iterator constructor overrides the sort_key
57 attribute of the Dataset, or defers to it if None.
58 train: Whether the iterator represents a train set.
59 repeat: Whether to repeat the iterator for multiple epochs.
60 shuffle: Whether to shuffle examples between epochs.
61 sort: Whether to sort examples according to self.sort_key.
62 Note that repeat, shuffle, and sort default to train, train, and
63 (not train).
64 device: Device to create batches on. Use -1 for CPU and None for the
65 currently active GPU device.
66 """
67
68 def __init__(self, dataset, batch_size, sort_key=None, device=None,
69 batch_size_fn=lambda new, count, sofar: count, train=True,
70 repeat=None, shuffle=None, sort=None):
71 self.batch_size, self.train, self.dataset = batch_size, train, dataset
72 self.batch_size_fn = batch_size_fn
73 self.iterations = 0
74 self.repeat = train if repeat is None else repeat
75 self.shuffle = train if shuffle is None else shuffle
76 self.sort = not train if sort is None else sort
77 if sort_key is None:
78 self.sort_key = dataset.sort_key
79 else:
80 self.sort_key = sort_key
81 self.device = device
82
83 self.random_shuffler = RandomShuffler()
84
85 # For state loading/saving only
86 self._iterations_this_epoch = 0
87 self._random_state_this_epoch = None
88 self._restored_from_state = False
89
90 @classmethod
91 def splits(cls, datasets, batch_sizes=None, **kwargs):
92 """Create Iterator objects for multiple splits of a dataset.
93
94 Arguments:
95 datasets: Tuple of Dataset objects corresponding to the splits. The
96 first such object should be the train set.
97 batch_sizes: Tuple of batch sizes to use for the different splits,
98 or None to use the same batch_size for all splits.
99 Remaining keyword arguments: Passed to the constructor of the
100 iterator class being used.
101 """
102 if batch_sizes is None:
103 batch_sizes = [kwargs.pop('batch_size')] * len(datasets)
104 ret = []
105 for i in range(len(datasets)):
106 train = i == 0
107 ret.append(cls(
108 datasets[i], batch_size=batch_sizes[i], train=train, **kwargs))
109 return tuple(ret)
110
111 def data(self):
112 """Return the examples in the dataset in order, sorted, or shuffled."""
113 if self.shuffle:
114 xs = [self.dataset[i] for i in self.random_shuffler(range(len(self.dataset)))]
115 elif self.sort:
116 xs = sorted(self.dataset, key=self.sort_key)
117 else:
118 xs = self.dataset
119 return xs
120
121 def init_epoch(self):
122 """Set up the batch generator for a new epoch."""
123
124 if self._restored_from_state:
125 self.random_shuffler.random_state = self._random_state_this_epoch
126 else:
127 self._random_state_this_epoch = self.random_shuffler.random_state
128
129 self.create_batches()
130
131 if self._restored_from_state:
132 self._restored_from_state = False
133 else:
134 self._iterations_this_epoch = 0
135
136 if not self.repeat:
137 self.iterations = 0
138
139 def create_batches(self):
140 self.batches = batch(self.data(), self.batch_size, self.batch_size_fn)
141
142 @property
143 def epoch(self):
144 return self.iterations / len(self)
145
146 def __len__(self):
147 return math.ceil(len(self.dataset) / self.batch_size)
148
149 def __iter__(self):
150 while True:
151 self.init_epoch()
152 for idx, minibatch in enumerate(self.batches):
153 # fast-forward if loaded from state
154 if self._iterations_this_epoch > idx:
155 continue
156 self.iterations += 1
157 self._iterations_this_epoch += 1
158 yield Batch(minibatch, self.dataset, self.device,
159 self.train)
160 if not self.repeat:
161 raise StopIteration
162
163 def state_dict(self):
164 return {
165 "iterations": self.iterations,
166 "iterations_this_epoch": self._iterations_this_epoch,
167 "random_state_this_epoch": self._random_state_this_epoch}
168
169 def load_state_dict(self, state_dict):
170 self.iterations = state_dict["iterations"]
171 self._iterations_this_epoch = state_dict["iterations_this_epoch"]
172 self._random_state_this_epoch = state_dict["random_state_this_epoch"]
173 self._restored_from_state = True
174
175
176 class BPTTIterator(Iterator):
177 """Defines an iterator for language modeling tasks that use BPTT.
178
179 Provides contiguous streams of examples together with targets that are
180 one timestep further forward, for language modeling training with
181 backpropagation through time (BPTT). Expects a Dataset with a single
182 example and a single field called 'text' and produces Batches with text and
183 target attributes.
184
185 Attributes:
186 dataset: The Dataset object to load Examples from.
187 batch_size: Batch size.
188 bptt_len: Length of sequences for backpropagation through time.
189 sort_key: A key to use for sorting examples in order to batch together
190 examples with similar lengths and minimize padding. The sort_key
191 provided to the Iterator constructor overrides the sort_key
192 attribute of the Dataset, or defers to it if None.
193 train: Whether the iterator represents a train set.
194 repeat: Whether to repeat the iterator for multiple epochs.
195 shuffle: Whether to shuffle examples between epochs.
196 sort: Whether to sort examples according to self.sort_key.
197 Note that repeat, shuffle, and sort default to train, train, and
198 (not train).
199 device: Device to create batches on. Use -1 for CPU and None for the
200 currently active GPU device.
201 """
202
203 def __init__(self, dataset, batch_size, bptt_len, **kwargs):
204 self.bptt_len = bptt_len
205 super(BPTTIterator, self).__init__(dataset, batch_size, **kwargs)
206
207 def __len__(self):
208 return math.ceil(len(self.dataset[0].text) /
209 (self.batch_size * self.bptt_len))
210
211 def __iter__(self):
212 text = self.dataset[0].text
213 TEXT = self.dataset.fields['text']
214 TEXT.eos_token = None
215 text = text + ([TEXT.pad_token] * (math.ceil(len(text) / self.batch_size) *
216 self.batch_size - len(text)))
217 data = TEXT.numericalize(
218 [text], device=self.device, train=self.train)
219 data = data.view(self.batch_size, -1).t().contiguous()
220 dataset = Dataset(examples=self.dataset.examples, fields=[
221 ('text', TEXT), ('target', TEXT)])
222 while True:
223 for i in range(0, len(self) * self.bptt_len, self.bptt_len):
224 seq_len = min(self.bptt_len, len(data) - 1 - i)
225 yield Batch.fromvars(
226 dataset, self.batch_size, train=self.train,
227 text=data[i:i + seq_len],
228 target=data[i + 1:i + 1 + seq_len])
229 if not self.repeat:
230 raise StopIteration
231
232
233 class BucketIterator(Iterator):
234 """Defines an iterator that batches examples of similar lengths together.
235
236 Minimizes amount of padding needed while producing freshly shuffled
237 batches for each new epoch. See pool for the bucketing procedure used.
238 """
239
240 def create_batches(self):
241 if self.sort:
242 self.batches = batch(self.data(), self.batch_size,
243 self.batch_size_fn)
244 else:
245 self.batches = pool(self.data(), self.batch_size,
246 self.sort_key, self.batch_size_fn,
247 random_shuffler=self.random_shuffler)
248
249
250 def batch(data, batch_size, batch_size_fn=lambda new, count, sofar: count):
251 """Yield elements from data in chunks of batch_size."""
252 minibatch, size_so_far = [], 0
253 for ex in data:
254 minibatch.append(ex)
255 size_so_far = batch_size_fn(ex, len(minibatch), size_so_far)
256 if size_so_far == batch_size:
257 yield minibatch
258 minibatch, size_so_far = [], 0
259 elif size_so_far > batch_size:
260 yield minibatch[:-1]
261 minibatch, size_so_far = minibatch[:-1], batch_size_fn(ex, 1, 0)
262 if minibatch:
263 yield minibatch
264
265
266 def pool(data, batch_size, key, batch_size_fn=lambda new, count, sofar: count,
267 random_shuffler=None):
268 """Sort within buckets, then batch, then shuffle batches.
269
270 Partitions data into chunks of size 100*batch_size, sorts examples within
271 each chunk using sort_key, then batch these examples and shuffle the
272 batches.
273 """
274 if random_shuffler is None:
275 random_shuffler = random.shuffle
276 for p in batch(data, batch_size * 100, batch_size_fn):
277 p_batch = batch(sorted(p, key=key), batch_size, batch_size_fn)
278 for b in random_shuffler(list(p_batch)):
279 yield b
280
[end of torchtext/data/iterator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchtext/data/iterator.py b/torchtext/data/iterator.py
--- a/torchtext/data/iterator.py
+++ b/torchtext/data/iterator.py
@@ -110,10 +110,10 @@
def data(self):
"""Return the examples in the dataset in order, sorted, or shuffled."""
- if self.shuffle:
- xs = [self.dataset[i] for i in self.random_shuffler(range(len(self.dataset)))]
- elif self.sort:
+ if self.sort:
xs = sorted(self.dataset, key=self.sort_key)
+ elif self.shuffle:
+ xs = [self.dataset[i] for i in self.random_shuffler(range(len(self.dataset)))]
else:
xs = self.dataset
return xs
| {"golden_diff": "diff --git a/torchtext/data/iterator.py b/torchtext/data/iterator.py\n--- a/torchtext/data/iterator.py\n+++ b/torchtext/data/iterator.py\n@@ -110,10 +110,10 @@\n \n def data(self):\n \"\"\"Return the examples in the dataset in order, sorted, or shuffled.\"\"\"\n- if self.shuffle:\n- xs = [self.dataset[i] for i in self.random_shuffler(range(len(self.dataset)))]\n- elif self.sort:\n+ if self.sort:\n xs = sorted(self.dataset, key=self.sort_key)\n+ elif self.shuffle:\n+ xs = [self.dataset[i] for i in self.random_shuffler(range(len(self.dataset)))]\n else:\n xs = self.dataset\n return xs\n", "issue": "Consistency with sorting: `sort=True`\n**Problem:**\r\n```\r\n train_iter, dev_iter, test_iter = data.BucketIterator.splits(\r\n (train, dev, test),\r\n batch_sizes=(32, 256, 256),\r\n sort_key=lambda x: len(x.input),\r\n sort=True,\r\n device=-1) # Use CPU\r\n```\r\n\r\nIf `sort=True` and `train=True`, then the `train_iter` batches are shuffled. This behavior is unexpected.\r\n\r\n**Cause:**\r\nBecause by default `self.shuffle=True` is `train=True`. Then https://github.com/pytorch/text/blob/master/torchtext/data/iterator.py#L113 `shuffle` overrides `sort`.\r\n\r\n**Possible Solution:**\r\n`sort=True` should override `shuffle=None and train=True`.\n", "before_files": [{"content": "import math\nimport random\nfrom contextlib import contextmanager\nfrom copy import deepcopy\n\nfrom .batch import Batch\nfrom .dataset import Dataset\n\n\nclass RandomShuffler(object):\n \"\"\"Use random functions while keeping track of the random state to make it\n reproducible and deterministic.\"\"\"\n\n def __init__(self, random_state=None):\n self._random_state = random_state\n if self._random_state is None:\n self._random_state = random.getstate()\n\n @contextmanager\n def use_internal_state(self):\n \"\"\"Use a specific RNG state.\"\"\"\n old_state = random.getstate()\n random.setstate(self._random_state)\n yield\n self._random_state = random.getstate()\n random.setstate(old_state)\n\n @property\n def random_state(self):\n return deepcopy(self._random_state)\n\n @random_state.setter\n def random_state(self, s):\n self._random_state = s\n\n def __call__(self, data):\n \"\"\"Shuffle and return a new list.\"\"\"\n with self.use_internal_state():\n return random.sample(data, len(data))\n\n\nclass Iterator(object):\n \"\"\"Defines an iterator that loads batches of data from a Dataset.\n\n Attributes:\n dataset: The Dataset object to load Examples from.\n batch_size: Batch size.\n batch_size_fn: Function of three arguments (new example to add, current\n count of examples in the batch, and current effective batch size)\n that returns the new effective batch size resulting from adding\n that example to a batch. This is useful for dynamic batching, where\n this function would add to the current effective batch size the\n number of tokens in the new example.\n sort_key: A key to use for sorting examples in order to batch together\n examples with similar lengths and minimize padding. The sort_key\n provided to the Iterator constructor overrides the sort_key\n attribute of the Dataset, or defers to it if None.\n train: Whether the iterator represents a train set.\n repeat: Whether to repeat the iterator for multiple epochs.\n shuffle: Whether to shuffle examples between epochs.\n sort: Whether to sort examples according to self.sort_key.\n Note that repeat, shuffle, and sort default to train, train, and\n (not train).\n device: Device to create batches on. Use -1 for CPU and None for the\n currently active GPU device.\n \"\"\"\n\n def __init__(self, dataset, batch_size, sort_key=None, device=None,\n batch_size_fn=lambda new, count, sofar: count, train=True,\n repeat=None, shuffle=None, sort=None):\n self.batch_size, self.train, self.dataset = batch_size, train, dataset\n self.batch_size_fn = batch_size_fn\n self.iterations = 0\n self.repeat = train if repeat is None else repeat\n self.shuffle = train if shuffle is None else shuffle\n self.sort = not train if sort is None else sort\n if sort_key is None:\n self.sort_key = dataset.sort_key\n else:\n self.sort_key = sort_key\n self.device = device\n\n self.random_shuffler = RandomShuffler()\n\n # For state loading/saving only\n self._iterations_this_epoch = 0\n self._random_state_this_epoch = None\n self._restored_from_state = False\n\n @classmethod\n def splits(cls, datasets, batch_sizes=None, **kwargs):\n \"\"\"Create Iterator objects for multiple splits of a dataset.\n\n Arguments:\n datasets: Tuple of Dataset objects corresponding to the splits. The\n first such object should be the train set.\n batch_sizes: Tuple of batch sizes to use for the different splits,\n or None to use the same batch_size for all splits.\n Remaining keyword arguments: Passed to the constructor of the\n iterator class being used.\n \"\"\"\n if batch_sizes is None:\n batch_sizes = [kwargs.pop('batch_size')] * len(datasets)\n ret = []\n for i in range(len(datasets)):\n train = i == 0\n ret.append(cls(\n datasets[i], batch_size=batch_sizes[i], train=train, **kwargs))\n return tuple(ret)\n\n def data(self):\n \"\"\"Return the examples in the dataset in order, sorted, or shuffled.\"\"\"\n if self.shuffle:\n xs = [self.dataset[i] for i in self.random_shuffler(range(len(self.dataset)))]\n elif self.sort:\n xs = sorted(self.dataset, key=self.sort_key)\n else:\n xs = self.dataset\n return xs\n\n def init_epoch(self):\n \"\"\"Set up the batch generator for a new epoch.\"\"\"\n\n if self._restored_from_state:\n self.random_shuffler.random_state = self._random_state_this_epoch\n else:\n self._random_state_this_epoch = self.random_shuffler.random_state\n\n self.create_batches()\n\n if self._restored_from_state:\n self._restored_from_state = False\n else:\n self._iterations_this_epoch = 0\n\n if not self.repeat:\n self.iterations = 0\n\n def create_batches(self):\n self.batches = batch(self.data(), self.batch_size, self.batch_size_fn)\n\n @property\n def epoch(self):\n return self.iterations / len(self)\n\n def __len__(self):\n return math.ceil(len(self.dataset) / self.batch_size)\n\n def __iter__(self):\n while True:\n self.init_epoch()\n for idx, minibatch in enumerate(self.batches):\n # fast-forward if loaded from state\n if self._iterations_this_epoch > idx:\n continue\n self.iterations += 1\n self._iterations_this_epoch += 1\n yield Batch(minibatch, self.dataset, self.device,\n self.train)\n if not self.repeat:\n raise StopIteration\n\n def state_dict(self):\n return {\n \"iterations\": self.iterations,\n \"iterations_this_epoch\": self._iterations_this_epoch,\n \"random_state_this_epoch\": self._random_state_this_epoch}\n\n def load_state_dict(self, state_dict):\n self.iterations = state_dict[\"iterations\"]\n self._iterations_this_epoch = state_dict[\"iterations_this_epoch\"]\n self._random_state_this_epoch = state_dict[\"random_state_this_epoch\"]\n self._restored_from_state = True\n\n\nclass BPTTIterator(Iterator):\n \"\"\"Defines an iterator for language modeling tasks that use BPTT.\n\n Provides contiguous streams of examples together with targets that are\n one timestep further forward, for language modeling training with\n backpropagation through time (BPTT). Expects a Dataset with a single\n example and a single field called 'text' and produces Batches with text and\n target attributes.\n\n Attributes:\n dataset: The Dataset object to load Examples from.\n batch_size: Batch size.\n bptt_len: Length of sequences for backpropagation through time.\n sort_key: A key to use for sorting examples in order to batch together\n examples with similar lengths and minimize padding. The sort_key\n provided to the Iterator constructor overrides the sort_key\n attribute of the Dataset, or defers to it if None.\n train: Whether the iterator represents a train set.\n repeat: Whether to repeat the iterator for multiple epochs.\n shuffle: Whether to shuffle examples between epochs.\n sort: Whether to sort examples according to self.sort_key.\n Note that repeat, shuffle, and sort default to train, train, and\n (not train).\n device: Device to create batches on. Use -1 for CPU and None for the\n currently active GPU device.\n \"\"\"\n\n def __init__(self, dataset, batch_size, bptt_len, **kwargs):\n self.bptt_len = bptt_len\n super(BPTTIterator, self).__init__(dataset, batch_size, **kwargs)\n\n def __len__(self):\n return math.ceil(len(self.dataset[0].text) /\n (self.batch_size * self.bptt_len))\n\n def __iter__(self):\n text = self.dataset[0].text\n TEXT = self.dataset.fields['text']\n TEXT.eos_token = None\n text = text + ([TEXT.pad_token] * (math.ceil(len(text) / self.batch_size) *\n self.batch_size - len(text)))\n data = TEXT.numericalize(\n [text], device=self.device, train=self.train)\n data = data.view(self.batch_size, -1).t().contiguous()\n dataset = Dataset(examples=self.dataset.examples, fields=[\n ('text', TEXT), ('target', TEXT)])\n while True:\n for i in range(0, len(self) * self.bptt_len, self.bptt_len):\n seq_len = min(self.bptt_len, len(data) - 1 - i)\n yield Batch.fromvars(\n dataset, self.batch_size, train=self.train,\n text=data[i:i + seq_len],\n target=data[i + 1:i + 1 + seq_len])\n if not self.repeat:\n raise StopIteration\n\n\nclass BucketIterator(Iterator):\n \"\"\"Defines an iterator that batches examples of similar lengths together.\n\n Minimizes amount of padding needed while producing freshly shuffled\n batches for each new epoch. See pool for the bucketing procedure used.\n \"\"\"\n\n def create_batches(self):\n if self.sort:\n self.batches = batch(self.data(), self.batch_size,\n self.batch_size_fn)\n else:\n self.batches = pool(self.data(), self.batch_size,\n self.sort_key, self.batch_size_fn,\n random_shuffler=self.random_shuffler)\n\n\ndef batch(data, batch_size, batch_size_fn=lambda new, count, sofar: count):\n \"\"\"Yield elements from data in chunks of batch_size.\"\"\"\n minibatch, size_so_far = [], 0\n for ex in data:\n minibatch.append(ex)\n size_so_far = batch_size_fn(ex, len(minibatch), size_so_far)\n if size_so_far == batch_size:\n yield minibatch\n minibatch, size_so_far = [], 0\n elif size_so_far > batch_size:\n yield minibatch[:-1]\n minibatch, size_so_far = minibatch[:-1], batch_size_fn(ex, 1, 0)\n if minibatch:\n yield minibatch\n\n\ndef pool(data, batch_size, key, batch_size_fn=lambda new, count, sofar: count,\n random_shuffler=None):\n \"\"\"Sort within buckets, then batch, then shuffle batches.\n\n Partitions data into chunks of size 100*batch_size, sorts examples within\n each chunk using sort_key, then batch these examples and shuffle the\n batches.\n \"\"\"\n if random_shuffler is None:\n random_shuffler = random.shuffle\n for p in batch(data, batch_size * 100, batch_size_fn):\n p_batch = batch(sorted(p, key=key), batch_size, batch_size_fn)\n for b in random_shuffler(list(p_batch)):\n yield b\n", "path": "torchtext/data/iterator.py"}]} | 3,822 | 173 |
gh_patches_debug_2165 | rasdani/github-patches | git_diff | huggingface__huggingface_hub-234 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error when creating a repository
This error happens on huggingface_hub version 0.0.14 (current version) when creating a new dataset repository
```bash
(py38) bash-3.2$ huggingface-cli repo create --type dataset codeparrot-train
git version 2.29.2
git-lfs/2.13.3 (GitHub; darwin amd64; go 1.16.2; git a5e65851)
Traceback (most recent call last):
File "/Users/thomwolf/miniconda2/envs/py38/bin/huggingface-cli", line 8, in <module>
sys.exit(main())
File "/Users/thomwolf/miniconda2/envs/py38/lib/python3.8/site-packages/huggingface_hub/commands/huggingface_cli.py", line 41, in main
service.run()
File "/Users/thomwolf/miniconda2/envs/py38/lib/python3.8/site-packages/huggingface_hub/commands/user.py", line 228, in run
user, _ = self._api.whoami(token)
ValueError: too many values to unpack (expected 2)
```
</issue>
<code>
[start of src/huggingface_hub/commands/user.py]
1 # Copyright 2020 The HuggingFace Team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import subprocess
16 from argparse import ArgumentParser
17 from getpass import getpass
18 from typing import List, Union
19
20 from huggingface_hub.commands import BaseHuggingfaceCLICommand
21 from huggingface_hub.constants import REPO_TYPES, REPO_TYPES_URL_PREFIXES
22 from huggingface_hub.hf_api import HfApi, HfFolder
23 from requests.exceptions import HTTPError
24
25
26 class UserCommands(BaseHuggingfaceCLICommand):
27 @staticmethod
28 def register_subcommand(parser: ArgumentParser):
29 login_parser = parser.add_parser(
30 "login", help="Log in using the same credentials as on huggingface.co"
31 )
32 login_parser.set_defaults(func=lambda args: LoginCommand(args))
33 whoami_parser = parser.add_parser(
34 "whoami", help="Find out which huggingface.co account you are logged in as."
35 )
36 whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args))
37 logout_parser = parser.add_parser("logout", help="Log out")
38 logout_parser.set_defaults(func=lambda args: LogoutCommand(args))
39
40 # new system: git-based repo system
41 repo_parser = parser.add_parser(
42 "repo",
43 help="{create, ls-files} Commands to interact with your huggingface.co repos.",
44 )
45 repo_subparsers = repo_parser.add_subparsers(
46 help="huggingface.co repos related commands"
47 )
48 ls_parser = repo_subparsers.add_parser(
49 "ls-files", help="List all your files on huggingface.co"
50 )
51 ls_parser.add_argument(
52 "--organization", type=str, help="Optional: organization namespace."
53 )
54 ls_parser.set_defaults(func=lambda args: ListReposObjsCommand(args))
55 repo_create_parser = repo_subparsers.add_parser(
56 "create", help="Create a new repo on huggingface.co"
57 )
58 repo_create_parser.add_argument(
59 "name",
60 type=str,
61 help="Name for your repo. Will be namespaced under your username to build the repo id.",
62 )
63 repo_create_parser.add_argument(
64 "--type",
65 type=str,
66 help='Optional: repo_type: set to "dataset" or "space" if creating a dataset or space, default is model.',
67 )
68 repo_create_parser.add_argument(
69 "--organization", type=str, help="Optional: organization namespace."
70 )
71 repo_create_parser.add_argument(
72 "-y",
73 "--yes",
74 action="store_true",
75 help="Optional: answer Yes to the prompt",
76 )
77 repo_create_parser.set_defaults(func=lambda args: RepoCreateCommand(args))
78
79
80 class ANSI:
81 """
82 Helper for en.wikipedia.org/wiki/ANSI_escape_code
83 """
84
85 _bold = "\u001b[1m"
86 _red = "\u001b[31m"
87 _gray = "\u001b[90m"
88 _reset = "\u001b[0m"
89
90 @classmethod
91 def bold(cls, s):
92 return "{}{}{}".format(cls._bold, s, cls._reset)
93
94 @classmethod
95 def red(cls, s):
96 return "{}{}{}".format(cls._bold + cls._red, s, cls._reset)
97
98 @classmethod
99 def gray(cls, s):
100 return "{}{}{}".format(cls._gray, s, cls._reset)
101
102
103 def tabulate(rows: List[List[Union[str, int]]], headers: List[str]) -> str:
104 """
105 Inspired by:
106
107 - stackoverflow.com/a/8356620/593036
108 - stackoverflow.com/questions/9535954/printing-lists-as-tabular-data
109 """
110 col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)]
111 row_format = ("{{:{}}} " * len(headers)).format(*col_widths)
112 lines = []
113 lines.append(row_format.format(*headers))
114 lines.append(row_format.format(*["-" * w for w in col_widths]))
115 for row in rows:
116 lines.append(row_format.format(*row))
117 return "\n".join(lines)
118
119
120 class BaseUserCommand:
121 def __init__(self, args):
122 self.args = args
123 self._api = HfApi()
124
125
126 class LoginCommand(BaseUserCommand):
127 def run(self):
128 print( # docstyle-ignore
129 """
130 _| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_|
131 _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|
132 _|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_|
133 _| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|
134 _| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_|
135
136 """
137 )
138 username = input("Username: ")
139 password = getpass()
140 try:
141 token = self._api.login(username, password)
142 except HTTPError as e:
143 # probably invalid credentials, display error message.
144 print(e)
145 print(ANSI.red(e.response.text))
146 exit(1)
147 HfFolder.save_token(token)
148 print("Login successful")
149 print("Your token has been saved to", HfFolder.path_token)
150
151
152 class WhoamiCommand(BaseUserCommand):
153 def run(self):
154 token = HfFolder.get_token()
155 if token is None:
156 print("Not logged in")
157 exit()
158 try:
159 info = self._api.whoami(token)
160 print(info["name"])
161 orgs = [org["name"] for org in info["orgs"]]
162 if orgs:
163 print(ANSI.bold("orgs: "), ",".join(orgs))
164 except HTTPError as e:
165 print(e)
166 print(ANSI.red(e.response.text))
167 exit(1)
168
169
170 class LogoutCommand(BaseUserCommand):
171 def run(self):
172 token = HfFolder.get_token()
173 if token is None:
174 print("Not logged in")
175 exit()
176 HfFolder.delete_token()
177 self._api.logout(token)
178 print("Successfully logged out.")
179
180
181 class ListReposObjsCommand(BaseUserCommand):
182 def run(self):
183 token = HfFolder.get_token()
184 if token is None:
185 print("Not logged in")
186 exit(1)
187 try:
188 objs = self._api.list_repos_objs(token, organization=self.args.organization)
189 except HTTPError as e:
190 print(e)
191 print(ANSI.red(e.response.text))
192 exit(1)
193 if len(objs) == 0:
194 print("No shared file yet")
195 exit()
196 rows = [[obj.filename, obj.lastModified, obj.commit, obj.size] for obj in objs]
197 print(
198 tabulate(rows, headers=["Filename", "LastModified", "Commit-Sha", "Size"])
199 )
200
201
202 class RepoCreateCommand(BaseUserCommand):
203 def run(self):
204 token = HfFolder.get_token()
205 if token is None:
206 print("Not logged in")
207 exit(1)
208 try:
209 stdout = subprocess.check_output(["git", "--version"]).decode("utf-8")
210 print(ANSI.gray(stdout.strip()))
211 except FileNotFoundError:
212 print("Looks like you do not have git installed, please install.")
213
214 try:
215 stdout = subprocess.check_output(["git-lfs", "--version"]).decode("utf-8")
216 print(ANSI.gray(stdout.strip()))
217 except FileNotFoundError:
218 print(
219 ANSI.red(
220 "Looks like you do not have git-lfs installed, please install."
221 " You can install from https://git-lfs.github.com/."
222 " Then run `git lfs install` (you only have to do this once)."
223 )
224 )
225 print("")
226
227 user, _ = self._api.whoami(token)
228 namespace = (
229 self.args.organization if self.args.organization is not None else user
230 )
231
232 repo_id = f"{namespace}/{self.args.name}"
233
234 if self.args.type not in REPO_TYPES:
235 print("Invalid repo --type")
236 exit(1)
237
238 if self.args.type in REPO_TYPES_URL_PREFIXES:
239 repo_id = REPO_TYPES_URL_PREFIXES[self.args.type] + repo_id
240
241 print("You are about to create {}".format(ANSI.bold(repo_id)))
242
243 if not self.args.yes:
244 choice = input("Proceed? [Y/n] ").lower()
245 if not (choice == "" or choice == "y" or choice == "yes"):
246 print("Abort")
247 exit()
248 try:
249 url = self._api.create_repo(
250 token,
251 name=self.args.name,
252 organization=self.args.organization,
253 repo_type=self.args.type,
254 )
255 except HTTPError as e:
256 print(e)
257 print(ANSI.red(e.response.text))
258 exit(1)
259 print("\nYour repo now lives at:")
260 print(" {}".format(ANSI.bold(url)))
261 print(
262 "\nYou can clone it locally with the command below,"
263 " and commit/push as usual."
264 )
265 print(f"\n git clone {url}")
266 print("")
267
[end of src/huggingface_hub/commands/user.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/huggingface_hub/commands/user.py b/src/huggingface_hub/commands/user.py
--- a/src/huggingface_hub/commands/user.py
+++ b/src/huggingface_hub/commands/user.py
@@ -224,7 +224,7 @@
)
print("")
- user, _ = self._api.whoami(token)
+ user = self._api.whoami(token)["name"]
namespace = (
self.args.organization if self.args.organization is not None else user
)
| {"golden_diff": "diff --git a/src/huggingface_hub/commands/user.py b/src/huggingface_hub/commands/user.py\n--- a/src/huggingface_hub/commands/user.py\n+++ b/src/huggingface_hub/commands/user.py\n@@ -224,7 +224,7 @@\n )\n print(\"\")\n \n- user, _ = self._api.whoami(token)\n+ user = self._api.whoami(token)[\"name\"]\n namespace = (\n self.args.organization if self.args.organization is not None else user\n )\n", "issue": "Error when creating a repository\nThis error happens on huggingface_hub version 0.0.14 (current version) when creating a new dataset repository\r\n\r\n```bash\r\n(py38) bash-3.2$ huggingface-cli repo create --type dataset codeparrot-train\r\ngit version 2.29.2\r\ngit-lfs/2.13.3 (GitHub; darwin amd64; go 1.16.2; git a5e65851)\r\n\r\nTraceback (most recent call last):\r\n File \"/Users/thomwolf/miniconda2/envs/py38/bin/huggingface-cli\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/Users/thomwolf/miniconda2/envs/py38/lib/python3.8/site-packages/huggingface_hub/commands/huggingface_cli.py\", line 41, in main\r\n service.run()\r\n File \"/Users/thomwolf/miniconda2/envs/py38/lib/python3.8/site-packages/huggingface_hub/commands/user.py\", line 228, in run\r\n user, _ = self._api.whoami(token)\r\nValueError: too many values to unpack (expected 2)\r\n```\n", "before_files": [{"content": "# Copyright 2020 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport subprocess\nfrom argparse import ArgumentParser\nfrom getpass import getpass\nfrom typing import List, Union\n\nfrom huggingface_hub.commands import BaseHuggingfaceCLICommand\nfrom huggingface_hub.constants import REPO_TYPES, REPO_TYPES_URL_PREFIXES\nfrom huggingface_hub.hf_api import HfApi, HfFolder\nfrom requests.exceptions import HTTPError\n\n\nclass UserCommands(BaseHuggingfaceCLICommand):\n @staticmethod\n def register_subcommand(parser: ArgumentParser):\n login_parser = parser.add_parser(\n \"login\", help=\"Log in using the same credentials as on huggingface.co\"\n )\n login_parser.set_defaults(func=lambda args: LoginCommand(args))\n whoami_parser = parser.add_parser(\n \"whoami\", help=\"Find out which huggingface.co account you are logged in as.\"\n )\n whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args))\n logout_parser = parser.add_parser(\"logout\", help=\"Log out\")\n logout_parser.set_defaults(func=lambda args: LogoutCommand(args))\n\n # new system: git-based repo system\n repo_parser = parser.add_parser(\n \"repo\",\n help=\"{create, ls-files} Commands to interact with your huggingface.co repos.\",\n )\n repo_subparsers = repo_parser.add_subparsers(\n help=\"huggingface.co repos related commands\"\n )\n ls_parser = repo_subparsers.add_parser(\n \"ls-files\", help=\"List all your files on huggingface.co\"\n )\n ls_parser.add_argument(\n \"--organization\", type=str, help=\"Optional: organization namespace.\"\n )\n ls_parser.set_defaults(func=lambda args: ListReposObjsCommand(args))\n repo_create_parser = repo_subparsers.add_parser(\n \"create\", help=\"Create a new repo on huggingface.co\"\n )\n repo_create_parser.add_argument(\n \"name\",\n type=str,\n help=\"Name for your repo. Will be namespaced under your username to build the repo id.\",\n )\n repo_create_parser.add_argument(\n \"--type\",\n type=str,\n help='Optional: repo_type: set to \"dataset\" or \"space\" if creating a dataset or space, default is model.',\n )\n repo_create_parser.add_argument(\n \"--organization\", type=str, help=\"Optional: organization namespace.\"\n )\n repo_create_parser.add_argument(\n \"-y\",\n \"--yes\",\n action=\"store_true\",\n help=\"Optional: answer Yes to the prompt\",\n )\n repo_create_parser.set_defaults(func=lambda args: RepoCreateCommand(args))\n\n\nclass ANSI:\n \"\"\"\n Helper for en.wikipedia.org/wiki/ANSI_escape_code\n \"\"\"\n\n _bold = \"\\u001b[1m\"\n _red = \"\\u001b[31m\"\n _gray = \"\\u001b[90m\"\n _reset = \"\\u001b[0m\"\n\n @classmethod\n def bold(cls, s):\n return \"{}{}{}\".format(cls._bold, s, cls._reset)\n\n @classmethod\n def red(cls, s):\n return \"{}{}{}\".format(cls._bold + cls._red, s, cls._reset)\n\n @classmethod\n def gray(cls, s):\n return \"{}{}{}\".format(cls._gray, s, cls._reset)\n\n\ndef tabulate(rows: List[List[Union[str, int]]], headers: List[str]) -> str:\n \"\"\"\n Inspired by:\n\n - stackoverflow.com/a/8356620/593036\n - stackoverflow.com/questions/9535954/printing-lists-as-tabular-data\n \"\"\"\n col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)]\n row_format = (\"{{:{}}} \" * len(headers)).format(*col_widths)\n lines = []\n lines.append(row_format.format(*headers))\n lines.append(row_format.format(*[\"-\" * w for w in col_widths]))\n for row in rows:\n lines.append(row_format.format(*row))\n return \"\\n\".join(lines)\n\n\nclass BaseUserCommand:\n def __init__(self, args):\n self.args = args\n self._api = HfApi()\n\n\nclass LoginCommand(BaseUserCommand):\n def run(self):\n print( # docstyle-ignore\n \"\"\"\n _| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_|\n _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|\n _|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_|\n _| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|\n _| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_|\n\n \"\"\"\n )\n username = input(\"Username: \")\n password = getpass()\n try:\n token = self._api.login(username, password)\n except HTTPError as e:\n # probably invalid credentials, display error message.\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n HfFolder.save_token(token)\n print(\"Login successful\")\n print(\"Your token has been saved to\", HfFolder.path_token)\n\n\nclass WhoamiCommand(BaseUserCommand):\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit()\n try:\n info = self._api.whoami(token)\n print(info[\"name\"])\n orgs = [org[\"name\"] for org in info[\"orgs\"]]\n if orgs:\n print(ANSI.bold(\"orgs: \"), \",\".join(orgs))\n except HTTPError as e:\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n\n\nclass LogoutCommand(BaseUserCommand):\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit()\n HfFolder.delete_token()\n self._api.logout(token)\n print(\"Successfully logged out.\")\n\n\nclass ListReposObjsCommand(BaseUserCommand):\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit(1)\n try:\n objs = self._api.list_repos_objs(token, organization=self.args.organization)\n except HTTPError as e:\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n if len(objs) == 0:\n print(\"No shared file yet\")\n exit()\n rows = [[obj.filename, obj.lastModified, obj.commit, obj.size] for obj in objs]\n print(\n tabulate(rows, headers=[\"Filename\", \"LastModified\", \"Commit-Sha\", \"Size\"])\n )\n\n\nclass RepoCreateCommand(BaseUserCommand):\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit(1)\n try:\n stdout = subprocess.check_output([\"git\", \"--version\"]).decode(\"utf-8\")\n print(ANSI.gray(stdout.strip()))\n except FileNotFoundError:\n print(\"Looks like you do not have git installed, please install.\")\n\n try:\n stdout = subprocess.check_output([\"git-lfs\", \"--version\"]).decode(\"utf-8\")\n print(ANSI.gray(stdout.strip()))\n except FileNotFoundError:\n print(\n ANSI.red(\n \"Looks like you do not have git-lfs installed, please install.\"\n \" You can install from https://git-lfs.github.com/.\"\n \" Then run `git lfs install` (you only have to do this once).\"\n )\n )\n print(\"\")\n\n user, _ = self._api.whoami(token)\n namespace = (\n self.args.organization if self.args.organization is not None else user\n )\n\n repo_id = f\"{namespace}/{self.args.name}\"\n\n if self.args.type not in REPO_TYPES:\n print(\"Invalid repo --type\")\n exit(1)\n\n if self.args.type in REPO_TYPES_URL_PREFIXES:\n repo_id = REPO_TYPES_URL_PREFIXES[self.args.type] + repo_id\n\n print(\"You are about to create {}\".format(ANSI.bold(repo_id)))\n\n if not self.args.yes:\n choice = input(\"Proceed? [Y/n] \").lower()\n if not (choice == \"\" or choice == \"y\" or choice == \"yes\"):\n print(\"Abort\")\n exit()\n try:\n url = self._api.create_repo(\n token,\n name=self.args.name,\n organization=self.args.organization,\n repo_type=self.args.type,\n )\n except HTTPError as e:\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n print(\"\\nYour repo now lives at:\")\n print(\" {}\".format(ANSI.bold(url)))\n print(\n \"\\nYou can clone it locally with the command below,\"\n \" and commit/push as usual.\"\n )\n print(f\"\\n git clone {url}\")\n print(\"\")\n", "path": "src/huggingface_hub/commands/user.py"}]} | 3,723 | 116 |
gh_patches_debug_3232 | rasdani/github-patches | git_diff | ibis-project__ibis-4167 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
feat(sqlalchemy): allow non-primitive arrays
SQLAlchemy type inference fails on arrays where the value type is not a primitive or string prevent use cases like `array<array<int>>` from working.
</issue>
<code>
[start of ibis/backends/base/sql/alchemy/datatypes.py]
1 from __future__ import annotations
2
3 import functools
4 from typing import Iterable
5
6 import sqlalchemy as sa
7 from sqlalchemy.dialects import mysql, postgresql, sqlite
8 from sqlalchemy.dialects.mysql.base import MySQLDialect
9 from sqlalchemy.dialects.postgresql.base import PGDialect
10 from sqlalchemy.dialects.sqlite.base import SQLiteDialect
11 from sqlalchemy.engine.interfaces import Dialect
12 from sqlalchemy.types import UserDefinedType
13
14 import ibis.expr.datatypes as dt
15 import ibis.expr.schema as sch
16 from ibis.backends.base.sql.alchemy.geospatial import geospatial_supported
17
18 if geospatial_supported:
19 import geoalchemy2 as ga
20
21
22 class StructType(UserDefinedType):
23 def __init__(
24 self,
25 pairs: Iterable[tuple[str, sa.types.TypeEngine]],
26 ):
27 self.pairs = [
28 (name, sa.types.to_instance(type)) for name, type in pairs
29 ]
30
31 def get_col_spec(self, **_):
32 pairs = ", ".join(f"{k} {v}" for k, v in self.pairs)
33 return f"STRUCT({pairs})"
34
35
36 def table_from_schema(name, meta, schema, database: str | None = None):
37 # Convert Ibis schema to SQLA table
38 columns = []
39
40 for colname, dtype in zip(schema.names, schema.types):
41 satype = to_sqla_type(dtype)
42 column = sa.Column(colname, satype, nullable=dtype.nullable)
43 columns.append(column)
44
45 return sa.Table(name, meta, schema=database, *columns)
46
47
48 # TODO(cleanup)
49 ibis_type_to_sqla = {
50 dt.Null: sa.types.NullType,
51 dt.Date: sa.Date,
52 dt.Time: sa.Time,
53 dt.Boolean: sa.Boolean,
54 dt.Binary: sa.LargeBinary,
55 dt.String: sa.Text,
56 dt.Decimal: sa.NUMERIC,
57 # Mantissa-based
58 dt.Float16: sa.REAL,
59 dt.Float32: sa.REAL,
60 dt.Float64: sa.FLOAT,
61 dt.Int8: sa.SmallInteger,
62 dt.Int16: sa.SmallInteger,
63 dt.Int32: sa.Integer,
64 dt.Int64: sa.BigInteger,
65 dt.JSON: sa.JSON,
66 }
67
68
69 @functools.singledispatch
70 def to_sqla_type(itype, type_map=None):
71 if type_map is None:
72 type_map = ibis_type_to_sqla
73 return type_map[type(itype)]
74
75
76 @to_sqla_type.register(dt.Decimal)
77 def _(itype, **kwargs):
78 return sa.types.NUMERIC(itype.precision, itype.scale)
79
80
81 @to_sqla_type.register(dt.Interval)
82 def _(itype, **kwargs):
83 return sa.types.Interval()
84
85
86 @to_sqla_type.register(dt.Date)
87 def _(itype, **kwargs):
88 return sa.Date()
89
90
91 @to_sqla_type.register(dt.Timestamp)
92 def _(itype, **kwargs):
93 return sa.TIMESTAMP(bool(itype.timezone))
94
95
96 @to_sqla_type.register(dt.Array)
97 def _(itype, **kwargs):
98 ibis_type = itype.value_type
99 if not isinstance(ibis_type, (dt.Primitive, dt.String)):
100 raise TypeError(f'Type {ibis_type} is not a primitive or string type')
101 return sa.ARRAY(to_sqla_type(ibis_type, **kwargs))
102
103
104 @to_sqla_type.register(dt.Struct)
105 def _(itype, **_):
106 return StructType(
107 [(name, to_sqla_type(type)) for name, type in itype.pairs.items()]
108 )
109
110
111 @to_sqla_type.register(dt.GeoSpatial)
112 def _(itype, **kwargs):
113 if itype.geotype == 'geometry':
114 return ga.Geometry
115 elif itype.geotype == 'geography':
116 return ga.Geography
117 else:
118 return ga.types._GISType
119
120
121 @dt.dtype.register(Dialect, sa.types.NullType)
122 def sa_null(_, satype, nullable=True):
123 return dt.null
124
125
126 @dt.dtype.register(Dialect, sa.types.Boolean)
127 def sa_boolean(_, satype, nullable=True):
128 return dt.Boolean(nullable=nullable)
129
130
131 @dt.dtype.register(MySQLDialect, mysql.NUMERIC)
132 def sa_mysql_numeric(_, satype, nullable=True):
133 # https://dev.mysql.com/doc/refman/8.0/en/fixed-point-types.html
134 return dt.Decimal(
135 satype.precision or 10, satype.scale or 0, nullable=nullable
136 )
137
138
139 @dt.dtype.register(Dialect, sa.types.Numeric)
140 @dt.dtype.register(SQLiteDialect, sqlite.NUMERIC)
141 def sa_numeric(_, satype, nullable=True):
142 return dt.Decimal(satype.precision, satype.scale, nullable=nullable)
143
144
145 @dt.dtype.register(Dialect, sa.types.SmallInteger)
146 def sa_smallint(_, satype, nullable=True):
147 return dt.Int16(nullable=nullable)
148
149
150 @dt.dtype.register(Dialect, sa.types.Integer)
151 def sa_integer(_, satype, nullable=True):
152 return dt.Int32(nullable=nullable)
153
154
155 @dt.dtype.register(Dialect, mysql.TINYINT)
156 def sa_mysql_tinyint(_, satype, nullable=True):
157 return dt.Int8(nullable=nullable)
158
159
160 @dt.dtype.register(Dialect, sa.types.BigInteger)
161 def sa_bigint(_, satype, nullable=True):
162 return dt.Int64(nullable=nullable)
163
164
165 @dt.dtype.register(Dialect, sa.REAL)
166 def sa_float(_, satype, nullable=True):
167 return dt.Float32(nullable=nullable)
168
169
170 @dt.dtype.register(Dialect, sa.FLOAT)
171 @dt.dtype.register(SQLiteDialect, sa.REAL)
172 @dt.dtype.register(PGDialect, postgresql.DOUBLE_PRECISION)
173 def sa_double(_, satype, nullable=True):
174 return dt.Float64(nullable=nullable)
175
176
177 @dt.dtype.register(PGDialect, postgresql.UUID)
178 def sa_uuid(_, satype, nullable=True):
179 return dt.UUID(nullable=nullable)
180
181
182 @dt.dtype.register(PGDialect, postgresql.MACADDR)
183 def sa_macaddr(_, satype, nullable=True):
184 return dt.MACADDR(nullable=nullable)
185
186
187 @dt.dtype.register(PGDialect, postgresql.INET)
188 def sa_inet(_, satype, nullable=True):
189 return dt.INET(nullable=nullable)
190
191
192 @dt.dtype.register(PGDialect, postgresql.JSON)
193 def sa_json(_, satype, nullable=True):
194 return dt.JSON(nullable=nullable)
195
196
197 @dt.dtype.register(PGDialect, postgresql.JSONB)
198 def sa_jsonb(_, satype, nullable=True):
199 return dt.JSONB(nullable=nullable)
200
201
202 if geospatial_supported:
203
204 @dt.dtype.register(Dialect, (ga.Geometry, ga.types._GISType))
205 def ga_geometry(_, gatype, nullable=True):
206 t = gatype.geometry_type
207 if t == 'POINT':
208 return dt.Point(nullable=nullable)
209 if t == 'LINESTRING':
210 return dt.LineString(nullable=nullable)
211 if t == 'POLYGON':
212 return dt.Polygon(nullable=nullable)
213 if t == 'MULTILINESTRING':
214 return dt.MultiLineString(nullable=nullable)
215 if t == 'MULTIPOINT':
216 return dt.MultiPoint(nullable=nullable)
217 if t == 'MULTIPOLYGON':
218 return dt.MultiPolygon(nullable=nullable)
219 if t == 'GEOMETRY':
220 return dt.Geometry(nullable=nullable)
221 else:
222 raise ValueError(f"Unrecognized geometry type: {t}")
223
224
225 POSTGRES_FIELD_TO_IBIS_UNIT = {
226 "YEAR": "Y",
227 "MONTH": "M",
228 "DAY": "D",
229 "HOUR": "h",
230 "MINUTE": "m",
231 "SECOND": "s",
232 "YEAR TO MONTH": "M",
233 "DAY TO HOUR": "h",
234 "DAY TO MINUTE": "m",
235 "DAY TO SECOND": "s",
236 "HOUR TO MINUTE": "m",
237 "HOUR TO SECOND": "s",
238 "MINUTE TO SECOND": "s",
239 }
240
241
242 @dt.dtype.register(PGDialect, postgresql.INTERVAL)
243 def sa_postgres_interval(_, satype, nullable=True):
244 field = satype.fields.upper()
245 unit = POSTGRES_FIELD_TO_IBIS_UNIT.get(field, None)
246 if unit is None:
247 raise ValueError(f"Unknown PostgreSQL interval field {field!r}")
248 elif unit in {"Y", "M"}:
249 raise ValueError(
250 "Variable length timedeltas are not yet supported with PostgreSQL"
251 )
252 return dt.Interval(unit=unit, nullable=nullable)
253
254
255 @dt.dtype.register(MySQLDialect, mysql.DOUBLE)
256 def sa_mysql_double(_, satype, nullable=True):
257 # TODO: handle asdecimal=True
258 return dt.Float64(nullable=nullable)
259
260
261 @dt.dtype.register(Dialect, sa.types.String)
262 def sa_string(_, satype, nullable=True):
263 return dt.String(nullable=nullable)
264
265
266 @dt.dtype.register(Dialect, sa.LargeBinary)
267 def sa_binary(_, satype, nullable=True):
268 return dt.Binary(nullable=nullable)
269
270
271 @dt.dtype.register(Dialect, sa.Time)
272 def sa_time(_, satype, nullable=True):
273 return dt.Time(nullable=nullable)
274
275
276 @dt.dtype.register(Dialect, sa.Date)
277 def sa_date(_, satype, nullable=True):
278 return dt.Date(nullable=nullable)
279
280
281 @dt.dtype.register(Dialect, sa.DateTime)
282 def sa_datetime(_, satype, nullable=True, default_timezone='UTC'):
283 timezone = default_timezone if satype.timezone else None
284 return dt.Timestamp(timezone=timezone, nullable=nullable)
285
286
287 @dt.dtype.register(Dialect, sa.ARRAY)
288 def sa_array(dialect, satype, nullable=True):
289 dimensions = satype.dimensions
290 if dimensions is not None and dimensions != 1:
291 raise NotImplementedError('Nested array types not yet supported')
292
293 value_dtype = dt.dtype(dialect, satype.item_type)
294 return dt.Array(value_dtype, nullable=nullable)
295
296
297 @dt.dtype.register(Dialect, StructType)
298 def sa_struct(dialect, satype, nullable=True):
299 pairs = [(name, dt.dtype(dialect, typ)) for name, typ in satype.pairs]
300 return dt.Struct.from_tuples(pairs, nullable=nullable)
301
302
303 @sch.infer.register((sa.Table, sa.sql.TableClause))
304 def schema_from_table(table, schema=None):
305 """Retrieve an ibis schema from a SQLAlchemy ``Table``.
306
307 Parameters
308 ----------
309 table : sa.Table
310
311 Returns
312 -------
313 schema : ibis.expr.datatypes.Schema
314 An ibis schema corresponding to the types of the columns in `table`.
315 """
316 schema = schema if schema is not None else {}
317 pairs = []
318 for name, column in zip(table.columns.keys(), table.columns):
319 if name in schema:
320 dtype = dt.dtype(schema[name])
321 else:
322 dtype = dt.dtype(
323 getattr(table.bind, 'dialect', Dialect()),
324 column.type,
325 nullable=column.nullable,
326 )
327 pairs.append((name, dtype))
328 return sch.schema(pairs)
329
[end of ibis/backends/base/sql/alchemy/datatypes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ibis/backends/base/sql/alchemy/datatypes.py b/ibis/backends/base/sql/alchemy/datatypes.py
--- a/ibis/backends/base/sql/alchemy/datatypes.py
+++ b/ibis/backends/base/sql/alchemy/datatypes.py
@@ -96,8 +96,6 @@
@to_sqla_type.register(dt.Array)
def _(itype, **kwargs):
ibis_type = itype.value_type
- if not isinstance(ibis_type, (dt.Primitive, dt.String)):
- raise TypeError(f'Type {ibis_type} is not a primitive or string type')
return sa.ARRAY(to_sqla_type(ibis_type, **kwargs))
| {"golden_diff": "diff --git a/ibis/backends/base/sql/alchemy/datatypes.py b/ibis/backends/base/sql/alchemy/datatypes.py\n--- a/ibis/backends/base/sql/alchemy/datatypes.py\n+++ b/ibis/backends/base/sql/alchemy/datatypes.py\n@@ -96,8 +96,6 @@\n @to_sqla_type.register(dt.Array)\n def _(itype, **kwargs):\n ibis_type = itype.value_type\n- if not isinstance(ibis_type, (dt.Primitive, dt.String)):\n- raise TypeError(f'Type {ibis_type} is not a primitive or string type')\n return sa.ARRAY(to_sqla_type(ibis_type, **kwargs))\n", "issue": "feat(sqlalchemy): allow non-primitive arrays\nSQLAlchemy type inference fails on arrays where the value type is not a primitive or string prevent use cases like `array<array<int>>` from working.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport functools\nfrom typing import Iterable\n\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql, postgresql, sqlite\nfrom sqlalchemy.dialects.mysql.base import MySQLDialect\nfrom sqlalchemy.dialects.postgresql.base import PGDialect\nfrom sqlalchemy.dialects.sqlite.base import SQLiteDialect\nfrom sqlalchemy.engine.interfaces import Dialect\nfrom sqlalchemy.types import UserDefinedType\n\nimport ibis.expr.datatypes as dt\nimport ibis.expr.schema as sch\nfrom ibis.backends.base.sql.alchemy.geospatial import geospatial_supported\n\nif geospatial_supported:\n import geoalchemy2 as ga\n\n\nclass StructType(UserDefinedType):\n def __init__(\n self,\n pairs: Iterable[tuple[str, sa.types.TypeEngine]],\n ):\n self.pairs = [\n (name, sa.types.to_instance(type)) for name, type in pairs\n ]\n\n def get_col_spec(self, **_):\n pairs = \", \".join(f\"{k} {v}\" for k, v in self.pairs)\n return f\"STRUCT({pairs})\"\n\n\ndef table_from_schema(name, meta, schema, database: str | None = None):\n # Convert Ibis schema to SQLA table\n columns = []\n\n for colname, dtype in zip(schema.names, schema.types):\n satype = to_sqla_type(dtype)\n column = sa.Column(colname, satype, nullable=dtype.nullable)\n columns.append(column)\n\n return sa.Table(name, meta, schema=database, *columns)\n\n\n# TODO(cleanup)\nibis_type_to_sqla = {\n dt.Null: sa.types.NullType,\n dt.Date: sa.Date,\n dt.Time: sa.Time,\n dt.Boolean: sa.Boolean,\n dt.Binary: sa.LargeBinary,\n dt.String: sa.Text,\n dt.Decimal: sa.NUMERIC,\n # Mantissa-based\n dt.Float16: sa.REAL,\n dt.Float32: sa.REAL,\n dt.Float64: sa.FLOAT,\n dt.Int8: sa.SmallInteger,\n dt.Int16: sa.SmallInteger,\n dt.Int32: sa.Integer,\n dt.Int64: sa.BigInteger,\n dt.JSON: sa.JSON,\n}\n\n\[email protected]\ndef to_sqla_type(itype, type_map=None):\n if type_map is None:\n type_map = ibis_type_to_sqla\n return type_map[type(itype)]\n\n\n@to_sqla_type.register(dt.Decimal)\ndef _(itype, **kwargs):\n return sa.types.NUMERIC(itype.precision, itype.scale)\n\n\n@to_sqla_type.register(dt.Interval)\ndef _(itype, **kwargs):\n return sa.types.Interval()\n\n\n@to_sqla_type.register(dt.Date)\ndef _(itype, **kwargs):\n return sa.Date()\n\n\n@to_sqla_type.register(dt.Timestamp)\ndef _(itype, **kwargs):\n return sa.TIMESTAMP(bool(itype.timezone))\n\n\n@to_sqla_type.register(dt.Array)\ndef _(itype, **kwargs):\n ibis_type = itype.value_type\n if not isinstance(ibis_type, (dt.Primitive, dt.String)):\n raise TypeError(f'Type {ibis_type} is not a primitive or string type')\n return sa.ARRAY(to_sqla_type(ibis_type, **kwargs))\n\n\n@to_sqla_type.register(dt.Struct)\ndef _(itype, **_):\n return StructType(\n [(name, to_sqla_type(type)) for name, type in itype.pairs.items()]\n )\n\n\n@to_sqla_type.register(dt.GeoSpatial)\ndef _(itype, **kwargs):\n if itype.geotype == 'geometry':\n return ga.Geometry\n elif itype.geotype == 'geography':\n return ga.Geography\n else:\n return ga.types._GISType\n\n\[email protected](Dialect, sa.types.NullType)\ndef sa_null(_, satype, nullable=True):\n return dt.null\n\n\[email protected](Dialect, sa.types.Boolean)\ndef sa_boolean(_, satype, nullable=True):\n return dt.Boolean(nullable=nullable)\n\n\[email protected](MySQLDialect, mysql.NUMERIC)\ndef sa_mysql_numeric(_, satype, nullable=True):\n # https://dev.mysql.com/doc/refman/8.0/en/fixed-point-types.html\n return dt.Decimal(\n satype.precision or 10, satype.scale or 0, nullable=nullable\n )\n\n\[email protected](Dialect, sa.types.Numeric)\[email protected](SQLiteDialect, sqlite.NUMERIC)\ndef sa_numeric(_, satype, nullable=True):\n return dt.Decimal(satype.precision, satype.scale, nullable=nullable)\n\n\[email protected](Dialect, sa.types.SmallInteger)\ndef sa_smallint(_, satype, nullable=True):\n return dt.Int16(nullable=nullable)\n\n\[email protected](Dialect, sa.types.Integer)\ndef sa_integer(_, satype, nullable=True):\n return dt.Int32(nullable=nullable)\n\n\[email protected](Dialect, mysql.TINYINT)\ndef sa_mysql_tinyint(_, satype, nullable=True):\n return dt.Int8(nullable=nullable)\n\n\[email protected](Dialect, sa.types.BigInteger)\ndef sa_bigint(_, satype, nullable=True):\n return dt.Int64(nullable=nullable)\n\n\[email protected](Dialect, sa.REAL)\ndef sa_float(_, satype, nullable=True):\n return dt.Float32(nullable=nullable)\n\n\[email protected](Dialect, sa.FLOAT)\[email protected](SQLiteDialect, sa.REAL)\[email protected](PGDialect, postgresql.DOUBLE_PRECISION)\ndef sa_double(_, satype, nullable=True):\n return dt.Float64(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.UUID)\ndef sa_uuid(_, satype, nullable=True):\n return dt.UUID(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.MACADDR)\ndef sa_macaddr(_, satype, nullable=True):\n return dt.MACADDR(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.INET)\ndef sa_inet(_, satype, nullable=True):\n return dt.INET(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.JSON)\ndef sa_json(_, satype, nullable=True):\n return dt.JSON(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.JSONB)\ndef sa_jsonb(_, satype, nullable=True):\n return dt.JSONB(nullable=nullable)\n\n\nif geospatial_supported:\n\n @dt.dtype.register(Dialect, (ga.Geometry, ga.types._GISType))\n def ga_geometry(_, gatype, nullable=True):\n t = gatype.geometry_type\n if t == 'POINT':\n return dt.Point(nullable=nullable)\n if t == 'LINESTRING':\n return dt.LineString(nullable=nullable)\n if t == 'POLYGON':\n return dt.Polygon(nullable=nullable)\n if t == 'MULTILINESTRING':\n return dt.MultiLineString(nullable=nullable)\n if t == 'MULTIPOINT':\n return dt.MultiPoint(nullable=nullable)\n if t == 'MULTIPOLYGON':\n return dt.MultiPolygon(nullable=nullable)\n if t == 'GEOMETRY':\n return dt.Geometry(nullable=nullable)\n else:\n raise ValueError(f\"Unrecognized geometry type: {t}\")\n\n\nPOSTGRES_FIELD_TO_IBIS_UNIT = {\n \"YEAR\": \"Y\",\n \"MONTH\": \"M\",\n \"DAY\": \"D\",\n \"HOUR\": \"h\",\n \"MINUTE\": \"m\",\n \"SECOND\": \"s\",\n \"YEAR TO MONTH\": \"M\",\n \"DAY TO HOUR\": \"h\",\n \"DAY TO MINUTE\": \"m\",\n \"DAY TO SECOND\": \"s\",\n \"HOUR TO MINUTE\": \"m\",\n \"HOUR TO SECOND\": \"s\",\n \"MINUTE TO SECOND\": \"s\",\n}\n\n\[email protected](PGDialect, postgresql.INTERVAL)\ndef sa_postgres_interval(_, satype, nullable=True):\n field = satype.fields.upper()\n unit = POSTGRES_FIELD_TO_IBIS_UNIT.get(field, None)\n if unit is None:\n raise ValueError(f\"Unknown PostgreSQL interval field {field!r}\")\n elif unit in {\"Y\", \"M\"}:\n raise ValueError(\n \"Variable length timedeltas are not yet supported with PostgreSQL\"\n )\n return dt.Interval(unit=unit, nullable=nullable)\n\n\[email protected](MySQLDialect, mysql.DOUBLE)\ndef sa_mysql_double(_, satype, nullable=True):\n # TODO: handle asdecimal=True\n return dt.Float64(nullable=nullable)\n\n\[email protected](Dialect, sa.types.String)\ndef sa_string(_, satype, nullable=True):\n return dt.String(nullable=nullable)\n\n\[email protected](Dialect, sa.LargeBinary)\ndef sa_binary(_, satype, nullable=True):\n return dt.Binary(nullable=nullable)\n\n\[email protected](Dialect, sa.Time)\ndef sa_time(_, satype, nullable=True):\n return dt.Time(nullable=nullable)\n\n\[email protected](Dialect, sa.Date)\ndef sa_date(_, satype, nullable=True):\n return dt.Date(nullable=nullable)\n\n\[email protected](Dialect, sa.DateTime)\ndef sa_datetime(_, satype, nullable=True, default_timezone='UTC'):\n timezone = default_timezone if satype.timezone else None\n return dt.Timestamp(timezone=timezone, nullable=nullable)\n\n\[email protected](Dialect, sa.ARRAY)\ndef sa_array(dialect, satype, nullable=True):\n dimensions = satype.dimensions\n if dimensions is not None and dimensions != 1:\n raise NotImplementedError('Nested array types not yet supported')\n\n value_dtype = dt.dtype(dialect, satype.item_type)\n return dt.Array(value_dtype, nullable=nullable)\n\n\[email protected](Dialect, StructType)\ndef sa_struct(dialect, satype, nullable=True):\n pairs = [(name, dt.dtype(dialect, typ)) for name, typ in satype.pairs]\n return dt.Struct.from_tuples(pairs, nullable=nullable)\n\n\[email protected]((sa.Table, sa.sql.TableClause))\ndef schema_from_table(table, schema=None):\n \"\"\"Retrieve an ibis schema from a SQLAlchemy ``Table``.\n\n Parameters\n ----------\n table : sa.Table\n\n Returns\n -------\n schema : ibis.expr.datatypes.Schema\n An ibis schema corresponding to the types of the columns in `table`.\n \"\"\"\n schema = schema if schema is not None else {}\n pairs = []\n for name, column in zip(table.columns.keys(), table.columns):\n if name in schema:\n dtype = dt.dtype(schema[name])\n else:\n dtype = dt.dtype(\n getattr(table.bind, 'dialect', Dialect()),\n column.type,\n nullable=column.nullable,\n )\n pairs.append((name, dtype))\n return sch.schema(pairs)\n", "path": "ibis/backends/base/sql/alchemy/datatypes.py"}]} | 3,852 | 152 |
gh_patches_debug_17045 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-899 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The sdr metric in TM sometime gives NaN for some input
## 🐛 Bug
This issue is related with `fast-bss-eval`'s torch version, see https://github.com/fakufaku/fast_bss_eval/issues/5
### To Reproduce
```
import numpy as np
import torch
x = np.load('debug.npz')
preds = torch.tensor(x['preds'])
target = torch.tensor(x['target'])
print(preds.shape, target.shape)
from torchmetrics.functional.audio import signal_distortion_ratio
sdr = signal_distortion_ratio(preds, target)
print(sdr)
from mir_eval.separation import bss_eval_sources
sdr, _, _, _ = bss_eval_sources(target.numpy(), preds.numpy(), False)
print(sdr)
```
outputs:
```
torch.Size([2, 64000]) torch.Size([2, 64000])
tensor([-2.6815, nan])
[-2.68156071 44.58523729]
```
unzip [data.zip](https://github.com/PyTorchLightning/metrics/files/8309638/data.zip) to get the `debug.npz`
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
#### Code sample
<!-- Ideally attach a minimal code sample to reproduce the decried issue.
Minimal means having the shortest code but still preserving the bug. -->
### Expected behavior
the results given by signal_distortion_ratio is close to the one given by `mir_eval`
<!-- A clear and concise description of what you expected to happen. -->
### Environment
- OS (e.g., Linux):
- Python & PyTorch Version (e.g., 1.0):
- How you installed PyTorch (`conda`, `pip`, build command if you used source):
- Any other relevant information:
### Additional context
<!-- Add any other context about the problem here. -->
</issue>
<code>
[start of torchmetrics/functional/audio/sdr.py]
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Optional
16
17 import torch
18
19 from torchmetrics.utilities.imports import _FAST_BSS_EVAL_AVAILABLE, _TORCH_GREATER_EQUAL_1_8
20
21 if _FAST_BSS_EVAL_AVAILABLE:
22 if _TORCH_GREATER_EQUAL_1_8:
23 from fast_bss_eval.torch.cgd import toeplitz_conjugate_gradient
24 from fast_bss_eval.torch.helpers import _normalize
25 from fast_bss_eval.torch.linalg import toeplitz
26 from fast_bss_eval.torch.metrics import compute_stats
27
28 solve = torch.linalg.solve
29 else:
30 import numpy
31 from fast_bss_eval.numpy.cgd import toeplitz_conjugate_gradient
32 from fast_bss_eval.numpy.helpers import _normalize
33 from fast_bss_eval.numpy.linalg import toeplitz
34 from fast_bss_eval.numpy.metrics import compute_stats
35
36 solve = numpy.linalg.solve
37 else:
38 toeplitz = None
39 toeplitz_conjugate_gradient = None
40 compute_stats = None
41 _normalize = None
42 __doctest_skip__ = ["signal_distortion_ratio"]
43
44 from torch import Tensor
45
46 from torchmetrics.utilities import rank_zero_warn
47 from torchmetrics.utilities.checks import _check_same_shape
48
49
50 def signal_distortion_ratio(
51 preds: Tensor,
52 target: Tensor,
53 use_cg_iter: Optional[int] = None,
54 filter_length: int = 512,
55 zero_mean: bool = False,
56 load_diag: Optional[float] = None,
57 ) -> Tensor:
58 r"""Signal to Distortion Ratio (SDR) [1,2,3]
59
60 Args:
61 preds:
62 shape ``[..., time]``
63 target:
64 shape ``[..., time]``
65 use_cg_iter:
66 If provided, an iterative method is used to solve for the distortion
67 filter coefficients instead of direct Gaussian elimination.
68 This can speed up the computation of the metrics in case the filters
69 are long. Using a value of 10 here has been shown to provide
70 good accuracy in most cases and is sufficient when using this
71 loss to train neural separation networks.
72 filter_length:
73 The length of the distortion filter allowed
74 zero_mean:
75 When set to True, the mean of all signals is subtracted prior to computation of the metrics
76 load_diag:
77 If provided, this small value is added to the diagonal coefficients of
78 the system metrics when solving for the filter coefficients.
79 This can help stabilize the metric in the case where some of the reference
80 signals may sometimes be zero
81
82 Raises:
83 ModuleNotFoundError:
84 If ``fast-bss-eval`` package is not installed
85
86 Returns:
87 sdr value of shape ``[...]``
88
89 Example:
90
91 >>> from torchmetrics.functional.audio import signal_distortion_ratio
92 >>> import torch
93 >>> g = torch.manual_seed(1)
94 >>> preds = torch.randn(8000)
95 >>> target = torch.randn(8000)
96 >>> signal_distortion_ratio(preds, target)
97 tensor(-12.0589)
98 >>> # use with permutation_invariant_training
99 >>> from torchmetrics.functional.audio import permutation_invariant_training
100 >>> preds = torch.randn(4, 2, 8000) # [batch, spk, time]
101 >>> target = torch.randn(4, 2, 8000)
102 >>> best_metric, best_perm = permutation_invariant_training(preds, target, signal_distortion_ratio, 'max')
103 >>> best_metric
104 tensor([-11.6375, -11.4358, -11.7148, -11.6325])
105 >>> best_perm
106 tensor([[1, 0],
107 [0, 1],
108 [1, 0],
109 [0, 1]])
110
111 .. note::
112 1. when pytorch<1.8.0, numpy will be used to calculate this metric, which causes ``sdr`` to be
113 non-differentiable and slower to calculate
114
115 2. using this metrics requires you to have ``fast-bss-eval`` install. Either install as ``pip install
116 torchmetrics[audio]`` or ``pip install fast-bss-eval``
117
118 3. preds and target need to have the same dtype, otherwise target will be converted to preds' dtype
119
120
121 References:
122 [1] Vincent, E., Gribonval, R., & Fevotte, C. (2006). Performance measurement in blind audio source separation.
123 IEEE Transactions on Audio, Speech and Language Processing, 14(4), 1462–1469.
124
125 [2] Scheibler, R. (2021). SDR -- Medium Rare with Fast Computations.
126
127 [3] https://github.com/fakufaku/fast_bss_eval
128 """
129 if not _FAST_BSS_EVAL_AVAILABLE:
130 raise ModuleNotFoundError(
131 "SDR metric requires that `fast-bss-eval` is installed."
132 " Either install as `pip install torchmetrics[audio]` or `pip install fast-bss-eval`."
133 )
134 _check_same_shape(preds, target)
135
136 if not preds.dtype.is_floating_point:
137 preds = preds.float() # for torch.norm
138
139 # half precision support
140 if preds.dtype == torch.float16:
141 preds = preds.to(torch.float32)
142
143 if preds.dtype != target.dtype: # for torch.linalg.solve
144 target = target.to(preds.dtype)
145
146 if zero_mean:
147 preds = preds - preds.mean(dim=-1, keepdim=True)
148 target = target - target.mean(dim=-1, keepdim=True)
149
150 # normalize along time-axis
151 if not _TORCH_GREATER_EQUAL_1_8:
152 # use numpy if torch<1.8
153 rank_zero_warn(
154 "Pytorch is under 1.8, thus SDR numpy version is used."
155 "For better performance and differentiability, you should change to Pytorch v1.8 or above."
156 )
157 device = preds.device
158 preds = preds.detach().cpu().numpy()
159 target = target.detach().cpu().numpy()
160
161 preds = _normalize(preds, axis=-1)
162 target = _normalize(target, axis=-1)
163 else:
164 preds = _normalize(preds, dim=-1)
165 target = _normalize(target, dim=-1)
166
167 # solve for the optimal filter
168 # compute auto-correlation and cross-correlation
169 acf, xcorr = compute_stats(target, preds, length=filter_length, pairwise=False)
170
171 if load_diag is not None:
172 # the diagonal factor of the Toeplitz matrix is the first
173 # coefficient of the acf
174 acf[..., 0] += load_diag
175
176 if use_cg_iter is not None:
177 # use preconditioned conjugate gradient
178 sol = toeplitz_conjugate_gradient(acf, xcorr, n_iter=use_cg_iter)
179 else:
180 # regular matrix solver
181 r_mat = toeplitz(acf)
182 sol = solve(r_mat, xcorr)
183
184 # to tensor if torch<1.8
185 if not _TORCH_GREATER_EQUAL_1_8:
186 sol = torch.tensor(sol, device=device)
187 xcorr = torch.tensor(xcorr, device=device)
188
189 # compute the coherence
190 coh = torch.einsum("...l,...l->...", xcorr, sol)
191
192 # transform to decibels
193 ratio = coh / (1 - coh)
194 val = 10.0 * torch.log10(ratio)
195 return val
196
197
198 def scale_invariant_signal_distortion_ratio(preds: Tensor, target: Tensor, zero_mean: bool = False) -> Tensor:
199 """Calculates Scale-invariant signal-to-distortion ratio (SI-SDR) metric. The SI-SDR value is in general
200 considered an overall measure of how good a source sound.
201
202 Args:
203 preds:
204 shape ``[...,time]``
205 target:
206 shape ``[...,time]``
207 zero_mean:
208 If to zero mean target and preds or not
209
210 Returns:
211 si-sdr value of shape [...]
212
213 Example:
214 >>> from torchmetrics.functional.audio import scale_invariant_signal_distortion_ratio
215 >>> target = torch.tensor([3.0, -0.5, 2.0, 7.0])
216 >>> preds = torch.tensor([2.5, 0.0, 2.0, 8.0])
217 >>> scale_invariant_signal_distortion_ratio(preds, target)
218 tensor(18.4030)
219
220 References:
221 [1] Le Roux, Jonathan, et al. "SDR half-baked or well done." IEEE International Conference on Acoustics, Speech
222 and Signal Processing (ICASSP) 2019.
223 """
224 _check_same_shape(preds, target)
225 eps = torch.finfo(preds.dtype).eps
226
227 if zero_mean:
228 target = target - torch.mean(target, dim=-1, keepdim=True)
229 preds = preds - torch.mean(preds, dim=-1, keepdim=True)
230
231 alpha = (torch.sum(preds * target, dim=-1, keepdim=True) + eps) / (
232 torch.sum(target ** 2, dim=-1, keepdim=True) + eps
233 )
234 target_scaled = alpha * target
235
236 noise = target_scaled - preds
237
238 val = (torch.sum(target_scaled ** 2, dim=-1) + eps) / (torch.sum(noise ** 2, dim=-1) + eps)
239 val = 10 * torch.log10(val)
240
241 return val
242
[end of torchmetrics/functional/audio/sdr.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchmetrics/functional/audio/sdr.py b/torchmetrics/functional/audio/sdr.py
--- a/torchmetrics/functional/audio/sdr.py
+++ b/torchmetrics/functional/audio/sdr.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import warnings
from typing import Optional
import torch
@@ -192,6 +193,15 @@
# transform to decibels
ratio = coh / (1 - coh)
val = 10.0 * torch.log10(ratio)
+
+ # recompute sdr in float64 if val is NaN or Inf
+ if (torch.isnan(val).any() or torch.isinf(val).any()) and preds.dtype != torch.float64:
+ warnings.warn(
+ "Detected `nan` or `inf` value in computed metric, retrying computation in double precision",
+ UserWarning,
+ )
+ val = signal_distortion_ratio(preds.double(), target.double(), use_cg_iter, filter_length, zero_mean, load_diag)
+
return val
| {"golden_diff": "diff --git a/torchmetrics/functional/audio/sdr.py b/torchmetrics/functional/audio/sdr.py\n--- a/torchmetrics/functional/audio/sdr.py\n+++ b/torchmetrics/functional/audio/sdr.py\n@@ -12,6 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import warnings\n from typing import Optional\n \n import torch\n@@ -192,6 +193,15 @@\n # transform to decibels\n ratio = coh / (1 - coh)\n val = 10.0 * torch.log10(ratio)\n+\n+ # recompute sdr in float64 if val is NaN or Inf\n+ if (torch.isnan(val).any() or torch.isinf(val).any()) and preds.dtype != torch.float64:\n+ warnings.warn(\n+ \"Detected `nan` or `inf` value in computed metric, retrying computation in double precision\",\n+ UserWarning,\n+ )\n+ val = signal_distortion_ratio(preds.double(), target.double(), use_cg_iter, filter_length, zero_mean, load_diag)\n+\n return val\n", "issue": "The sdr metric in TM sometime gives NaN for some input\n## \ud83d\udc1b Bug\r\n\r\nThis issue is related with `fast-bss-eval`'s torch version, see https://github.com/fakufaku/fast_bss_eval/issues/5\r\n\r\n### To Reproduce\r\n\r\n```\r\nimport numpy as np\r\nimport torch\r\n\r\nx = np.load('debug.npz')\r\npreds = torch.tensor(x['preds'])\r\ntarget = torch.tensor(x['target'])\r\nprint(preds.shape, target.shape)\r\n\r\nfrom torchmetrics.functional.audio import signal_distortion_ratio\r\nsdr = signal_distortion_ratio(preds, target)\r\nprint(sdr)\r\n\r\nfrom mir_eval.separation import bss_eval_sources\r\nsdr, _, _, _ = bss_eval_sources(target.numpy(), preds.numpy(), False)\r\nprint(sdr)\r\n```\r\n\r\noutputs:\r\n```\r\ntorch.Size([2, 64000]) torch.Size([2, 64000])\r\ntensor([-2.6815, nan])\r\n[-2.68156071 44.58523729]\r\n```\r\n\r\nunzip [data.zip](https://github.com/PyTorchLightning/metrics/files/8309638/data.zip) to get the `debug.npz`\r\n\r\n\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n\r\n#### Code sample\r\n\r\n<!-- Ideally attach a minimal code sample to reproduce the decried issue.\r\nMinimal means having the shortest code but still preserving the bug. -->\r\n\r\n### Expected behavior\r\n\r\nthe results given by signal_distortion_ratio is close to the one given by `mir_eval`\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n### Environment\r\n\r\n- OS (e.g., Linux):\r\n- Python & PyTorch Version (e.g., 1.0):\r\n- How you installed PyTorch (`conda`, `pip`, build command if you used source):\r\n- Any other relevant information:\r\n\r\n### Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Optional\n\nimport torch\n\nfrom torchmetrics.utilities.imports import _FAST_BSS_EVAL_AVAILABLE, _TORCH_GREATER_EQUAL_1_8\n\nif _FAST_BSS_EVAL_AVAILABLE:\n if _TORCH_GREATER_EQUAL_1_8:\n from fast_bss_eval.torch.cgd import toeplitz_conjugate_gradient\n from fast_bss_eval.torch.helpers import _normalize\n from fast_bss_eval.torch.linalg import toeplitz\n from fast_bss_eval.torch.metrics import compute_stats\n\n solve = torch.linalg.solve\n else:\n import numpy\n from fast_bss_eval.numpy.cgd import toeplitz_conjugate_gradient\n from fast_bss_eval.numpy.helpers import _normalize\n from fast_bss_eval.numpy.linalg import toeplitz\n from fast_bss_eval.numpy.metrics import compute_stats\n\n solve = numpy.linalg.solve\nelse:\n toeplitz = None\n toeplitz_conjugate_gradient = None\n compute_stats = None\n _normalize = None\n __doctest_skip__ = [\"signal_distortion_ratio\"]\n\nfrom torch import Tensor\n\nfrom torchmetrics.utilities import rank_zero_warn\nfrom torchmetrics.utilities.checks import _check_same_shape\n\n\ndef signal_distortion_ratio(\n preds: Tensor,\n target: Tensor,\n use_cg_iter: Optional[int] = None,\n filter_length: int = 512,\n zero_mean: bool = False,\n load_diag: Optional[float] = None,\n) -> Tensor:\n r\"\"\"Signal to Distortion Ratio (SDR) [1,2,3]\n\n Args:\n preds:\n shape ``[..., time]``\n target:\n shape ``[..., time]``\n use_cg_iter:\n If provided, an iterative method is used to solve for the distortion\n filter coefficients instead of direct Gaussian elimination.\n This can speed up the computation of the metrics in case the filters\n are long. Using a value of 10 here has been shown to provide\n good accuracy in most cases and is sufficient when using this\n loss to train neural separation networks.\n filter_length:\n The length of the distortion filter allowed\n zero_mean:\n When set to True, the mean of all signals is subtracted prior to computation of the metrics\n load_diag:\n If provided, this small value is added to the diagonal coefficients of\n the system metrics when solving for the filter coefficients.\n This can help stabilize the metric in the case where some of the reference\n signals may sometimes be zero\n\n Raises:\n ModuleNotFoundError:\n If ``fast-bss-eval`` package is not installed\n\n Returns:\n sdr value of shape ``[...]``\n\n Example:\n\n >>> from torchmetrics.functional.audio import signal_distortion_ratio\n >>> import torch\n >>> g = torch.manual_seed(1)\n >>> preds = torch.randn(8000)\n >>> target = torch.randn(8000)\n >>> signal_distortion_ratio(preds, target)\n tensor(-12.0589)\n >>> # use with permutation_invariant_training\n >>> from torchmetrics.functional.audio import permutation_invariant_training\n >>> preds = torch.randn(4, 2, 8000) # [batch, spk, time]\n >>> target = torch.randn(4, 2, 8000)\n >>> best_metric, best_perm = permutation_invariant_training(preds, target, signal_distortion_ratio, 'max')\n >>> best_metric\n tensor([-11.6375, -11.4358, -11.7148, -11.6325])\n >>> best_perm\n tensor([[1, 0],\n [0, 1],\n [1, 0],\n [0, 1]])\n\n .. note::\n 1. when pytorch<1.8.0, numpy will be used to calculate this metric, which causes ``sdr`` to be\n non-differentiable and slower to calculate\n\n 2. using this metrics requires you to have ``fast-bss-eval`` install. Either install as ``pip install\n torchmetrics[audio]`` or ``pip install fast-bss-eval``\n\n 3. preds and target need to have the same dtype, otherwise target will be converted to preds' dtype\n\n\n References:\n [1] Vincent, E., Gribonval, R., & Fevotte, C. (2006). Performance measurement in blind audio source separation.\n IEEE Transactions on Audio, Speech and Language Processing, 14(4), 1462\u20131469.\n\n [2] Scheibler, R. (2021). SDR -- Medium Rare with Fast Computations.\n\n [3] https://github.com/fakufaku/fast_bss_eval\n \"\"\"\n if not _FAST_BSS_EVAL_AVAILABLE:\n raise ModuleNotFoundError(\n \"SDR metric requires that `fast-bss-eval` is installed.\"\n \" Either install as `pip install torchmetrics[audio]` or `pip install fast-bss-eval`.\"\n )\n _check_same_shape(preds, target)\n\n if not preds.dtype.is_floating_point:\n preds = preds.float() # for torch.norm\n\n # half precision support\n if preds.dtype == torch.float16:\n preds = preds.to(torch.float32)\n\n if preds.dtype != target.dtype: # for torch.linalg.solve\n target = target.to(preds.dtype)\n\n if zero_mean:\n preds = preds - preds.mean(dim=-1, keepdim=True)\n target = target - target.mean(dim=-1, keepdim=True)\n\n # normalize along time-axis\n if not _TORCH_GREATER_EQUAL_1_8:\n # use numpy if torch<1.8\n rank_zero_warn(\n \"Pytorch is under 1.8, thus SDR numpy version is used.\"\n \"For better performance and differentiability, you should change to Pytorch v1.8 or above.\"\n )\n device = preds.device\n preds = preds.detach().cpu().numpy()\n target = target.detach().cpu().numpy()\n\n preds = _normalize(preds, axis=-1)\n target = _normalize(target, axis=-1)\n else:\n preds = _normalize(preds, dim=-1)\n target = _normalize(target, dim=-1)\n\n # solve for the optimal filter\n # compute auto-correlation and cross-correlation\n acf, xcorr = compute_stats(target, preds, length=filter_length, pairwise=False)\n\n if load_diag is not None:\n # the diagonal factor of the Toeplitz matrix is the first\n # coefficient of the acf\n acf[..., 0] += load_diag\n\n if use_cg_iter is not None:\n # use preconditioned conjugate gradient\n sol = toeplitz_conjugate_gradient(acf, xcorr, n_iter=use_cg_iter)\n else:\n # regular matrix solver\n r_mat = toeplitz(acf)\n sol = solve(r_mat, xcorr)\n\n # to tensor if torch<1.8\n if not _TORCH_GREATER_EQUAL_1_8:\n sol = torch.tensor(sol, device=device)\n xcorr = torch.tensor(xcorr, device=device)\n\n # compute the coherence\n coh = torch.einsum(\"...l,...l->...\", xcorr, sol)\n\n # transform to decibels\n ratio = coh / (1 - coh)\n val = 10.0 * torch.log10(ratio)\n return val\n\n\ndef scale_invariant_signal_distortion_ratio(preds: Tensor, target: Tensor, zero_mean: bool = False) -> Tensor:\n \"\"\"Calculates Scale-invariant signal-to-distortion ratio (SI-SDR) metric. The SI-SDR value is in general\n considered an overall measure of how good a source sound.\n\n Args:\n preds:\n shape ``[...,time]``\n target:\n shape ``[...,time]``\n zero_mean:\n If to zero mean target and preds or not\n\n Returns:\n si-sdr value of shape [...]\n\n Example:\n >>> from torchmetrics.functional.audio import scale_invariant_signal_distortion_ratio\n >>> target = torch.tensor([3.0, -0.5, 2.0, 7.0])\n >>> preds = torch.tensor([2.5, 0.0, 2.0, 8.0])\n >>> scale_invariant_signal_distortion_ratio(preds, target)\n tensor(18.4030)\n\n References:\n [1] Le Roux, Jonathan, et al. \"SDR half-baked or well done.\" IEEE International Conference on Acoustics, Speech\n and Signal Processing (ICASSP) 2019.\n \"\"\"\n _check_same_shape(preds, target)\n eps = torch.finfo(preds.dtype).eps\n\n if zero_mean:\n target = target - torch.mean(target, dim=-1, keepdim=True)\n preds = preds - torch.mean(preds, dim=-1, keepdim=True)\n\n alpha = (torch.sum(preds * target, dim=-1, keepdim=True) + eps) / (\n torch.sum(target ** 2, dim=-1, keepdim=True) + eps\n )\n target_scaled = alpha * target\n\n noise = target_scaled - preds\n\n val = (torch.sum(target_scaled ** 2, dim=-1) + eps) / (torch.sum(noise ** 2, dim=-1) + eps)\n val = 10 * torch.log10(val)\n\n return val\n", "path": "torchmetrics/functional/audio/sdr.py"}]} | 3,845 | 256 |
gh_patches_debug_4281 | rasdani/github-patches | git_diff | ocf__ocfweb-808 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
disk quota on Account commands page errors
When trying to check disk quota usage on the commands page (https://www.ocf.berkeley.edu/account/commands/)
this error appears
`quota: Bad output format units for human readable output: vQ`

</issue>
<code>
[start of ocfweb/account/commands.py]
1 from django import forms
2 from django.forms import widgets
3 from django.http import HttpRequest
4 from django.http import HttpResponse
5 from django.shortcuts import render
6 from paramiko import AuthenticationException
7 from paramiko import SSHClient
8 from paramiko.hostkeys import HostKeyEntry
9
10 from ocfweb.component.forms import Form
11
12
13 def commands(request: HttpRequest) -> HttpResponse:
14 command_to_run = ''
15 output = ''
16 error = ''
17 if request.method == 'POST':
18 form = CommandForm(request.POST)
19 if form.is_valid():
20 username = form.cleaned_data['username']
21 password = form.cleaned_data['password']
22
23 command_to_run = form.cleaned_data['command_to_run']
24
25 ssh = SSHClient()
26
27 host_keys = ssh.get_host_keys()
28 entry = HostKeyEntry.from_line(
29 'ssh.ocf.berkeley.edu ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAqMkHVVoMl8md25iky7e2Xe3ARaC4H1PbIpv5Y+xT4KOT17gGvFSmfjGyW9P8ZTyqxq560iWdyELIn7efaGPbkUo9retcnT6WLmuh9nRIYwb6w7BGEEvlblBmH27Fkgt7JQ6+1sr5teuABfIMg22WTQAeDQe1jg0XsPu36OjbC7HjA3BXsiNBpxKDolYIXWzOD+r9FxZLP0lawh8dl//O5FW4ha1IbHklq2i9Mgl79wAH3jxf66kQJTvLmalKnQ0Dbp2+vYGGhIjVFXlGSzKsHAVhuVD6TBXZbxWOYoXanS7CC43MrEtBYYnc6zMn/k/rH0V+WeRhuzTnr/OZGJbBBw==', # noqa
30 )
31 assert entry is not None # should never be none as we are passing a static string above
32 host_keys.add(
33 'ssh.ocf.berkeley.edu',
34 'ssh-rsa',
35 entry.key,
36 )
37
38 try:
39 ssh.connect(
40 'ssh.ocf.berkeley.edu',
41 username=username,
42 password=password,
43 )
44 except AuthenticationException:
45 error = 'Authentication failed. Did you type the wrong username or password?'
46
47 if not error:
48 _, ssh_stdout, ssh_stderr = ssh.exec_command(command_to_run, get_pty=True)
49 output = ssh_stdout.read().decode()
50 error = ssh_stderr.read().decode()
51 else:
52 form = CommandForm()
53
54 return render(
55 request,
56 'account/commands/index.html', {
57 'title': 'Account commands',
58 'form': form,
59 'command': command_to_run,
60 'output': output,
61 'error': error,
62 },
63 )
64
65
66 class CommandForm(Form):
67 username = forms.CharField(
68 label='OCF username',
69 min_length=3,
70 max_length=16,
71 )
72 password = forms.CharField(
73 widget=forms.PasswordInput,
74 label='Password',
75 min_length=8,
76 max_length=256,
77 )
78
79 COMMAND_CHOICES = (
80 (
81 '/opt/share/utils/bin/paper',
82 'paper quota -- how many pages you have remaining this semester',
83 ),
84 (
85 '/usr/bin/quota -svQ',
86 'disk quota -- how much disk space you have used and how much you ' +
87 'have left',
88 ),
89 (
90 '/opt/share/utils/bin/makehttp',
91 'makehttp -- set up the web space for your OCF account',
92 ),
93 (
94 'echo yes | /opt/share/utils/bin/makemysql',
95 'makemysql -- reset your MySQL database password, or create a new ' +
96 'MySQL database (copy down the password somewhere secure)',
97 ),
98 )
99
100 command_to_run = forms.ChoiceField(
101 choices=COMMAND_CHOICES,
102 label='Command to run',
103 widget=widgets.RadioSelect,
104 )
105
[end of ocfweb/account/commands.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ocfweb/account/commands.py b/ocfweb/account/commands.py
--- a/ocfweb/account/commands.py
+++ b/ocfweb/account/commands.py
@@ -82,7 +82,7 @@
'paper quota -- how many pages you have remaining this semester',
),
(
- '/usr/bin/quota -svQ',
+ '/usr/bin/quota -vQs',
'disk quota -- how much disk space you have used and how much you ' +
'have left',
),
| {"golden_diff": "diff --git a/ocfweb/account/commands.py b/ocfweb/account/commands.py\n--- a/ocfweb/account/commands.py\n+++ b/ocfweb/account/commands.py\n@@ -82,7 +82,7 @@\n 'paper quota -- how many pages you have remaining this semester',\n ),\n (\n- '/usr/bin/quota -svQ',\n+ '/usr/bin/quota -vQs',\n 'disk quota -- how much disk space you have used and how much you ' +\n 'have left',\n ),\n", "issue": "disk quota on Account commands page errors\nWhen trying to check disk quota usage on the commands page (https://www.ocf.berkeley.edu/account/commands/)\r\nthis error appears\r\n\r\n`quota: Bad output format units for human readable output: vQ`\r\n\r\n\r\n\n", "before_files": [{"content": "from django import forms\nfrom django.forms import widgets\nfrom django.http import HttpRequest\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom paramiko import AuthenticationException\nfrom paramiko import SSHClient\nfrom paramiko.hostkeys import HostKeyEntry\n\nfrom ocfweb.component.forms import Form\n\n\ndef commands(request: HttpRequest) -> HttpResponse:\n command_to_run = ''\n output = ''\n error = ''\n if request.method == 'POST':\n form = CommandForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n\n command_to_run = form.cleaned_data['command_to_run']\n\n ssh = SSHClient()\n\n host_keys = ssh.get_host_keys()\n entry = HostKeyEntry.from_line(\n 'ssh.ocf.berkeley.edu ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAqMkHVVoMl8md25iky7e2Xe3ARaC4H1PbIpv5Y+xT4KOT17gGvFSmfjGyW9P8ZTyqxq560iWdyELIn7efaGPbkUo9retcnT6WLmuh9nRIYwb6w7BGEEvlblBmH27Fkgt7JQ6+1sr5teuABfIMg22WTQAeDQe1jg0XsPu36OjbC7HjA3BXsiNBpxKDolYIXWzOD+r9FxZLP0lawh8dl//O5FW4ha1IbHklq2i9Mgl79wAH3jxf66kQJTvLmalKnQ0Dbp2+vYGGhIjVFXlGSzKsHAVhuVD6TBXZbxWOYoXanS7CC43MrEtBYYnc6zMn/k/rH0V+WeRhuzTnr/OZGJbBBw==', # noqa\n )\n assert entry is not None # should never be none as we are passing a static string above\n host_keys.add(\n 'ssh.ocf.berkeley.edu',\n 'ssh-rsa',\n entry.key,\n )\n\n try:\n ssh.connect(\n 'ssh.ocf.berkeley.edu',\n username=username,\n password=password,\n )\n except AuthenticationException:\n error = 'Authentication failed. Did you type the wrong username or password?'\n\n if not error:\n _, ssh_stdout, ssh_stderr = ssh.exec_command(command_to_run, get_pty=True)\n output = ssh_stdout.read().decode()\n error = ssh_stderr.read().decode()\n else:\n form = CommandForm()\n\n return render(\n request,\n 'account/commands/index.html', {\n 'title': 'Account commands',\n 'form': form,\n 'command': command_to_run,\n 'output': output,\n 'error': error,\n },\n )\n\n\nclass CommandForm(Form):\n username = forms.CharField(\n label='OCF username',\n min_length=3,\n max_length=16,\n )\n password = forms.CharField(\n widget=forms.PasswordInput,\n label='Password',\n min_length=8,\n max_length=256,\n )\n\n COMMAND_CHOICES = (\n (\n '/opt/share/utils/bin/paper',\n 'paper quota -- how many pages you have remaining this semester',\n ),\n (\n '/usr/bin/quota -svQ',\n 'disk quota -- how much disk space you have used and how much you ' +\n 'have left',\n ),\n (\n '/opt/share/utils/bin/makehttp',\n 'makehttp -- set up the web space for your OCF account',\n ),\n (\n 'echo yes | /opt/share/utils/bin/makemysql',\n 'makemysql -- reset your MySQL database password, or create a new ' +\n 'MySQL database (copy down the password somewhere secure)',\n ),\n )\n\n command_to_run = forms.ChoiceField(\n choices=COMMAND_CHOICES,\n label='Command to run',\n widget=widgets.RadioSelect,\n )\n", "path": "ocfweb/account/commands.py"}]} | 1,779 | 122 |
gh_patches_debug_15944 | rasdani/github-patches | git_diff | aio-libs__aiohttp-1326 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support load/save cookie_jar
I found that cookie_jar do not provide any function to _load or save cookie_jar_
Is there any pythonic way to implement these two method?
```
session.cookie_jar.load('foo.cookie.txt')
....# do sth.
session.cookie_jar.save()
```
</issue>
<code>
[start of aiohttp/cookiejar.py]
1 import datetime
2 import re
3 from collections import defaultdict
4 from collections.abc import Mapping
5 from http.cookies import Morsel, SimpleCookie
6 from math import ceil
7
8 from yarl import URL
9
10 from .abc import AbstractCookieJar
11 from .helpers import is_ip_address
12
13
14 class CookieJar(AbstractCookieJar):
15 """Implements cookie storage adhering to RFC 6265."""
16
17 DATE_TOKENS_RE = re.compile(
18 "[\x09\x20-\x2F\x3B-\x40\x5B-\x60\x7B-\x7E]*"
19 "(?P<token>[\x00-\x08\x0A-\x1F\d:a-zA-Z\x7F-\xFF]+)")
20
21 DATE_HMS_TIME_RE = re.compile("(\d{1,2}):(\d{1,2}):(\d{1,2})")
22
23 DATE_DAY_OF_MONTH_RE = re.compile("(\d{1,2})")
24
25 DATE_MONTH_RE = re.compile("(jan)|(feb)|(mar)|(apr)|(may)|(jun)|(jul)|"
26 "(aug)|(sep)|(oct)|(nov)|(dec)", re.I)
27
28 DATE_YEAR_RE = re.compile("(\d{2,4})")
29
30 MAX_TIME = 2051215261.0 # so far in future (2035-01-01)
31
32 def __init__(self, *, unsafe=False, loop=None):
33 super().__init__(loop=loop)
34 self._cookies = defaultdict(SimpleCookie)
35 self._host_only_cookies = set()
36 self._unsafe = unsafe
37 self._next_expiration = ceil(self._loop.time())
38 self._expirations = {}
39
40 def clear(self):
41 self._cookies.clear()
42 self._host_only_cookies.clear()
43 self._next_expiration = ceil(self._loop.time())
44 self._expirations.clear()
45
46 def __iter__(self):
47 self._do_expiration()
48 for val in self._cookies.values():
49 yield from val.values()
50
51 def __len__(self):
52 return sum(1 for i in self)
53
54 def _do_expiration(self):
55 now = self._loop.time()
56 if self._next_expiration > now:
57 return
58 if not self._expirations:
59 return
60 next_expiration = self.MAX_TIME
61 to_del = []
62 cookies = self._cookies
63 expirations = self._expirations
64 for (domain, name), when in expirations.items():
65 if when < now:
66 cookies[domain].pop(name, None)
67 to_del.append((domain, name))
68 self._host_only_cookies.discard((domain, name))
69 else:
70 next_expiration = min(next_expiration, when)
71 for key in to_del:
72 del expirations[key]
73
74 self._next_expiration = ceil(next_expiration)
75
76 def _expire_cookie(self, when, domain, name):
77 self._next_expiration = min(self._next_expiration, when)
78 self._expirations[(domain, name)] = when
79
80 def update_cookies(self, cookies, response_url=URL()):
81 """Update cookies."""
82 hostname = response_url.host
83
84 if not self._unsafe and is_ip_address(hostname):
85 # Don't accept cookies from IPs
86 return
87
88 if isinstance(cookies, Mapping):
89 cookies = cookies.items()
90
91 for name, cookie in cookies:
92 if not isinstance(cookie, Morsel):
93 tmp = SimpleCookie()
94 tmp[name] = cookie
95 cookie = tmp[name]
96
97 domain = cookie["domain"]
98
99 # ignore domains with trailing dots
100 if domain.endswith('.'):
101 domain = ""
102 del cookie["domain"]
103
104 if not domain and hostname is not None:
105 # Set the cookie's domain to the response hostname
106 # and set its host-only-flag
107 self._host_only_cookies.add((hostname, name))
108 domain = cookie["domain"] = hostname
109
110 if domain.startswith("."):
111 # Remove leading dot
112 domain = domain[1:]
113 cookie["domain"] = domain
114
115 if hostname and not self._is_domain_match(domain, hostname):
116 # Setting cookies for different domains is not allowed
117 continue
118
119 path = cookie["path"]
120 if not path or not path.startswith("/"):
121 # Set the cookie's path to the response path
122 path = response_url.path
123 if not path.startswith("/"):
124 path = "/"
125 else:
126 # Cut everything from the last slash to the end
127 path = "/" + path[1:path.rfind("/")]
128 cookie["path"] = path
129
130 max_age = cookie["max-age"]
131 if max_age:
132 try:
133 delta_seconds = int(max_age)
134 self._expire_cookie(self._loop.time() + delta_seconds,
135 domain, name)
136 except ValueError:
137 cookie["max-age"] = ""
138
139 else:
140 expires = cookie["expires"]
141 if expires:
142 expire_time = self._parse_date(expires)
143 if expire_time:
144 self._expire_cookie(expire_time.timestamp(),
145 domain, name)
146 else:
147 cookie["expires"] = ""
148
149 # use dict method because SimpleCookie class modifies value
150 # before Python 3.4.3
151 dict.__setitem__(self._cookies[domain], name, cookie)
152
153 self._do_expiration()
154
155 def filter_cookies(self, request_url=URL()):
156 """Returns this jar's cookies filtered by their attributes."""
157 self._do_expiration()
158 filtered = SimpleCookie()
159 hostname = request_url.host or ""
160 is_not_secure = request_url.scheme not in ("https", "wss")
161
162 for cookie in self:
163 name = cookie.key
164 domain = cookie["domain"]
165
166 # Send shared cookies
167 if not domain:
168 filtered[name] = cookie.value
169 continue
170
171 if not self._unsafe and is_ip_address(hostname):
172 continue
173
174 if (domain, name) in self._host_only_cookies:
175 if domain != hostname:
176 continue
177 elif not self._is_domain_match(domain, hostname):
178 continue
179
180 if not self._is_path_match(request_url.path, cookie["path"]):
181 continue
182
183 if is_not_secure and cookie["secure"]:
184 continue
185
186 filtered[name] = cookie.value
187
188 return filtered
189
190 @staticmethod
191 def _is_domain_match(domain, hostname):
192 """Implements domain matching adhering to RFC 6265."""
193 if hostname == domain:
194 return True
195
196 if not hostname.endswith(domain):
197 return False
198
199 non_matching = hostname[:-len(domain)]
200
201 if not non_matching.endswith("."):
202 return False
203
204 return not is_ip_address(hostname)
205
206 @staticmethod
207 def _is_path_match(req_path, cookie_path):
208 """Implements path matching adhering to RFC 6265."""
209 if not req_path.startswith("/"):
210 req_path = "/"
211
212 if req_path == cookie_path:
213 return True
214
215 if not req_path.startswith(cookie_path):
216 return False
217
218 if cookie_path.endswith("/"):
219 return True
220
221 non_matching = req_path[len(cookie_path):]
222
223 return non_matching.startswith("/")
224
225 @classmethod
226 def _parse_date(cls, date_str):
227 """Implements date string parsing adhering to RFC 6265."""
228 if not date_str:
229 return
230
231 found_time = False
232 found_day = False
233 found_month = False
234 found_year = False
235
236 hour = minute = second = 0
237 day = 0
238 month = 0
239 year = 0
240
241 for token_match in cls.DATE_TOKENS_RE.finditer(date_str):
242
243 token = token_match.group("token")
244
245 if not found_time:
246 time_match = cls.DATE_HMS_TIME_RE.match(token)
247 if time_match:
248 found_time = True
249 hour, minute, second = [
250 int(s) for s in time_match.groups()]
251 continue
252
253 if not found_day:
254 day_match = cls.DATE_DAY_OF_MONTH_RE.match(token)
255 if day_match:
256 found_day = True
257 day = int(day_match.group())
258 continue
259
260 if not found_month:
261 month_match = cls.DATE_MONTH_RE.match(token)
262 if month_match:
263 found_month = True
264 month = month_match.lastindex
265 continue
266
267 if not found_year:
268 year_match = cls.DATE_YEAR_RE.match(token)
269 if year_match:
270 found_year = True
271 year = int(year_match.group())
272
273 if 70 <= year <= 99:
274 year += 1900
275 elif 0 <= year <= 69:
276 year += 2000
277
278 if False in (found_day, found_month, found_year, found_time):
279 return
280
281 if not 1 <= day <= 31:
282 return
283
284 if year < 1601 or hour > 23 or minute > 59 or second > 59:
285 return
286
287 return datetime.datetime(year, month, day,
288 hour, minute, second,
289 tzinfo=datetime.timezone.utc)
290
[end of aiohttp/cookiejar.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/aiohttp/cookiejar.py b/aiohttp/cookiejar.py
--- a/aiohttp/cookiejar.py
+++ b/aiohttp/cookiejar.py
@@ -1,4 +1,6 @@
import datetime
+import pathlib
+import pickle
import re
from collections import defaultdict
from collections.abc import Mapping
@@ -37,6 +39,16 @@
self._next_expiration = ceil(self._loop.time())
self._expirations = {}
+ def save(self, file_path):
+ file_path = pathlib.Path(file_path)
+ with file_path.open(mode='wb') as f:
+ pickle.dump(self._cookies, f, pickle.HIGHEST_PROTOCOL)
+
+ def load(self, file_path):
+ file_path = pathlib.Path(file_path)
+ with file_path.open(mode='rb') as f:
+ self._cookies = pickle.load(f)
+
def clear(self):
self._cookies.clear()
self._host_only_cookies.clear()
| {"golden_diff": "diff --git a/aiohttp/cookiejar.py b/aiohttp/cookiejar.py\n--- a/aiohttp/cookiejar.py\n+++ b/aiohttp/cookiejar.py\n@@ -1,4 +1,6 @@\n import datetime\n+import pathlib\n+import pickle\n import re\n from collections import defaultdict\n from collections.abc import Mapping\n@@ -37,6 +39,16 @@\n self._next_expiration = ceil(self._loop.time())\n self._expirations = {}\n \n+ def save(self, file_path):\n+ file_path = pathlib.Path(file_path)\n+ with file_path.open(mode='wb') as f:\n+ pickle.dump(self._cookies, f, pickle.HIGHEST_PROTOCOL)\n+\n+ def load(self, file_path):\n+ file_path = pathlib.Path(file_path)\n+ with file_path.open(mode='rb') as f:\n+ self._cookies = pickle.load(f)\n+\n def clear(self):\n self._cookies.clear()\n self._host_only_cookies.clear()\n", "issue": "Support load/save cookie_jar\nI found that cookie_jar do not provide any function to _load or save cookie_jar_\n\nIs there any pythonic way to implement these two method?\n\n```\nsession.cookie_jar.load('foo.cookie.txt')\n....# do sth.\nsession.cookie_jar.save()\n\n```\n\n", "before_files": [{"content": "import datetime\nimport re\nfrom collections import defaultdict\nfrom collections.abc import Mapping\nfrom http.cookies import Morsel, SimpleCookie\nfrom math import ceil\n\nfrom yarl import URL\n\nfrom .abc import AbstractCookieJar\nfrom .helpers import is_ip_address\n\n\nclass CookieJar(AbstractCookieJar):\n \"\"\"Implements cookie storage adhering to RFC 6265.\"\"\"\n\n DATE_TOKENS_RE = re.compile(\n \"[\\x09\\x20-\\x2F\\x3B-\\x40\\x5B-\\x60\\x7B-\\x7E]*\"\n \"(?P<token>[\\x00-\\x08\\x0A-\\x1F\\d:a-zA-Z\\x7F-\\xFF]+)\")\n\n DATE_HMS_TIME_RE = re.compile(\"(\\d{1,2}):(\\d{1,2}):(\\d{1,2})\")\n\n DATE_DAY_OF_MONTH_RE = re.compile(\"(\\d{1,2})\")\n\n DATE_MONTH_RE = re.compile(\"(jan)|(feb)|(mar)|(apr)|(may)|(jun)|(jul)|\"\n \"(aug)|(sep)|(oct)|(nov)|(dec)\", re.I)\n\n DATE_YEAR_RE = re.compile(\"(\\d{2,4})\")\n\n MAX_TIME = 2051215261.0 # so far in future (2035-01-01)\n\n def __init__(self, *, unsafe=False, loop=None):\n super().__init__(loop=loop)\n self._cookies = defaultdict(SimpleCookie)\n self._host_only_cookies = set()\n self._unsafe = unsafe\n self._next_expiration = ceil(self._loop.time())\n self._expirations = {}\n\n def clear(self):\n self._cookies.clear()\n self._host_only_cookies.clear()\n self._next_expiration = ceil(self._loop.time())\n self._expirations.clear()\n\n def __iter__(self):\n self._do_expiration()\n for val in self._cookies.values():\n yield from val.values()\n\n def __len__(self):\n return sum(1 for i in self)\n\n def _do_expiration(self):\n now = self._loop.time()\n if self._next_expiration > now:\n return\n if not self._expirations:\n return\n next_expiration = self.MAX_TIME\n to_del = []\n cookies = self._cookies\n expirations = self._expirations\n for (domain, name), when in expirations.items():\n if when < now:\n cookies[domain].pop(name, None)\n to_del.append((domain, name))\n self._host_only_cookies.discard((domain, name))\n else:\n next_expiration = min(next_expiration, when)\n for key in to_del:\n del expirations[key]\n\n self._next_expiration = ceil(next_expiration)\n\n def _expire_cookie(self, when, domain, name):\n self._next_expiration = min(self._next_expiration, when)\n self._expirations[(domain, name)] = when\n\n def update_cookies(self, cookies, response_url=URL()):\n \"\"\"Update cookies.\"\"\"\n hostname = response_url.host\n\n if not self._unsafe and is_ip_address(hostname):\n # Don't accept cookies from IPs\n return\n\n if isinstance(cookies, Mapping):\n cookies = cookies.items()\n\n for name, cookie in cookies:\n if not isinstance(cookie, Morsel):\n tmp = SimpleCookie()\n tmp[name] = cookie\n cookie = tmp[name]\n\n domain = cookie[\"domain\"]\n\n # ignore domains with trailing dots\n if domain.endswith('.'):\n domain = \"\"\n del cookie[\"domain\"]\n\n if not domain and hostname is not None:\n # Set the cookie's domain to the response hostname\n # and set its host-only-flag\n self._host_only_cookies.add((hostname, name))\n domain = cookie[\"domain\"] = hostname\n\n if domain.startswith(\".\"):\n # Remove leading dot\n domain = domain[1:]\n cookie[\"domain\"] = domain\n\n if hostname and not self._is_domain_match(domain, hostname):\n # Setting cookies for different domains is not allowed\n continue\n\n path = cookie[\"path\"]\n if not path or not path.startswith(\"/\"):\n # Set the cookie's path to the response path\n path = response_url.path\n if not path.startswith(\"/\"):\n path = \"/\"\n else:\n # Cut everything from the last slash to the end\n path = \"/\" + path[1:path.rfind(\"/\")]\n cookie[\"path\"] = path\n\n max_age = cookie[\"max-age\"]\n if max_age:\n try:\n delta_seconds = int(max_age)\n self._expire_cookie(self._loop.time() + delta_seconds,\n domain, name)\n except ValueError:\n cookie[\"max-age\"] = \"\"\n\n else:\n expires = cookie[\"expires\"]\n if expires:\n expire_time = self._parse_date(expires)\n if expire_time:\n self._expire_cookie(expire_time.timestamp(),\n domain, name)\n else:\n cookie[\"expires\"] = \"\"\n\n # use dict method because SimpleCookie class modifies value\n # before Python 3.4.3\n dict.__setitem__(self._cookies[domain], name, cookie)\n\n self._do_expiration()\n\n def filter_cookies(self, request_url=URL()):\n \"\"\"Returns this jar's cookies filtered by their attributes.\"\"\"\n self._do_expiration()\n filtered = SimpleCookie()\n hostname = request_url.host or \"\"\n is_not_secure = request_url.scheme not in (\"https\", \"wss\")\n\n for cookie in self:\n name = cookie.key\n domain = cookie[\"domain\"]\n\n # Send shared cookies\n if not domain:\n filtered[name] = cookie.value\n continue\n\n if not self._unsafe and is_ip_address(hostname):\n continue\n\n if (domain, name) in self._host_only_cookies:\n if domain != hostname:\n continue\n elif not self._is_domain_match(domain, hostname):\n continue\n\n if not self._is_path_match(request_url.path, cookie[\"path\"]):\n continue\n\n if is_not_secure and cookie[\"secure\"]:\n continue\n\n filtered[name] = cookie.value\n\n return filtered\n\n @staticmethod\n def _is_domain_match(domain, hostname):\n \"\"\"Implements domain matching adhering to RFC 6265.\"\"\"\n if hostname == domain:\n return True\n\n if not hostname.endswith(domain):\n return False\n\n non_matching = hostname[:-len(domain)]\n\n if not non_matching.endswith(\".\"):\n return False\n\n return not is_ip_address(hostname)\n\n @staticmethod\n def _is_path_match(req_path, cookie_path):\n \"\"\"Implements path matching adhering to RFC 6265.\"\"\"\n if not req_path.startswith(\"/\"):\n req_path = \"/\"\n\n if req_path == cookie_path:\n return True\n\n if not req_path.startswith(cookie_path):\n return False\n\n if cookie_path.endswith(\"/\"):\n return True\n\n non_matching = req_path[len(cookie_path):]\n\n return non_matching.startswith(\"/\")\n\n @classmethod\n def _parse_date(cls, date_str):\n \"\"\"Implements date string parsing adhering to RFC 6265.\"\"\"\n if not date_str:\n return\n\n found_time = False\n found_day = False\n found_month = False\n found_year = False\n\n hour = minute = second = 0\n day = 0\n month = 0\n year = 0\n\n for token_match in cls.DATE_TOKENS_RE.finditer(date_str):\n\n token = token_match.group(\"token\")\n\n if not found_time:\n time_match = cls.DATE_HMS_TIME_RE.match(token)\n if time_match:\n found_time = True\n hour, minute, second = [\n int(s) for s in time_match.groups()]\n continue\n\n if not found_day:\n day_match = cls.DATE_DAY_OF_MONTH_RE.match(token)\n if day_match:\n found_day = True\n day = int(day_match.group())\n continue\n\n if not found_month:\n month_match = cls.DATE_MONTH_RE.match(token)\n if month_match:\n found_month = True\n month = month_match.lastindex\n continue\n\n if not found_year:\n year_match = cls.DATE_YEAR_RE.match(token)\n if year_match:\n found_year = True\n year = int(year_match.group())\n\n if 70 <= year <= 99:\n year += 1900\n elif 0 <= year <= 69:\n year += 2000\n\n if False in (found_day, found_month, found_year, found_time):\n return\n\n if not 1 <= day <= 31:\n return\n\n if year < 1601 or hour > 23 or minute > 59 or second > 59:\n return\n\n return datetime.datetime(year, month, day,\n hour, minute, second,\n tzinfo=datetime.timezone.utc)\n", "path": "aiohttp/cookiejar.py"}]} | 3,364 | 222 |
gh_patches_debug_23038 | rasdani/github-patches | git_diff | aws__aws-cli-2702 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cloudformation deploy does not honour tabs in JSON templates
Most of our templates are using tabs for the indentation and everything was fine until we tried to feed the template to `aws cloudformation deploy` which did not like the template despite that every other function in AWS CLI works with the template just fine:
```
[galaxy@athena templates]$ aws cloudformation validate-template --template-body file://codepipeline.template
{
"CapabilitiesReason": "The following resource(s) require capabilities: [AWS::IAM::Role]",
"Description": "Playing with CodeCommit, CodeBuild, and CodeDeploy",
"Parameters": [
```
and the only function that fails to parse the template is:
```
[galaxy@athena templates]$ aws cloudformation deploy --stack-name "galaxy-ccc" --template-file codepipeline.template --capabilities CAPABILITY_IAM
while scanning for the next token
found character '\t' that cannot start any token
in "<string>", line 2, column 1:
"AWSTemplateFormatVersion": "20 ...
^
```
A quick fix is to replace tabs with spaces:
```
[galaxy@athena templates]$ sed 's,\t, ,g' codepipeline.template > c.template
[galaxy@athena templates]$ aws cloudformation deploy --stack-name "galaxy-ccc" --template-file c.template --capabilities CAPABILITY_IAM
Waiting for changeset to be created..
Waiting for stack create/update to complete
```
... but it would mean that we would need to fix all our templates which are valid JSON just to workaround a bug in the tool! :(
</issue>
<code>
[start of awscli/customizations/cloudformation/yamlhelper.py]
1 # Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13
14 import yaml
15 from awscli.compat import six
16 from yaml.resolver import ScalarNode, SequenceNode
17
18
19 def intrinsics_multi_constructor(loader, tag_prefix, node):
20 """
21 YAML constructor to parse CloudFormation intrinsics.
22 This will return a dictionary with key being the instrinsic name
23 """
24
25 # Get the actual tag name excluding the first exclamation
26 tag = node.tag[1:]
27
28 # All CloudFormation intrinsics have prefix Fn:: except Ref
29 prefix = "Fn::"
30 if tag == "Ref":
31 prefix = ""
32
33 cfntag = prefix + tag
34
35 if tag == "GetAtt" and isinstance(node.value, six.string_types):
36 # ShortHand notation for !GetAtt accepts Resource.Attribute format
37 # while the standard notation is to use an array
38 # [Resource, Attribute]. Convert shorthand to standard format
39 value = node.value.split(".", 1)
40
41 elif isinstance(node, ScalarNode):
42 # Value of this node is scalar
43 value = loader.construct_scalar(node)
44
45 elif isinstance(node, SequenceNode):
46 # Value of this node is an array (Ex: [1,2])
47 value = loader.construct_sequence(node)
48
49 else:
50 # Value of this node is an mapping (ex: {foo: bar})
51 value = loader.construct_mapping(node)
52
53 return {cfntag: value}
54
55
56 def yaml_dump(dict_to_dump):
57 """
58 Dumps the dictionary as a YAML document
59 :param dict_to_dump:
60 :return:
61 """
62 return yaml.safe_dump(dict_to_dump, default_flow_style=False)
63
64
65 def yaml_parse(yamlstr):
66
67 yaml.SafeLoader.add_multi_constructor("!", intrinsics_multi_constructor)
68
69 return yaml.safe_load(yamlstr)
70
[end of awscli/customizations/cloudformation/yamlhelper.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/awscli/customizations/cloudformation/yamlhelper.py b/awscli/customizations/cloudformation/yamlhelper.py
--- a/awscli/customizations/cloudformation/yamlhelper.py
+++ b/awscli/customizations/cloudformation/yamlhelper.py
@@ -10,11 +10,12 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-
+import json
import yaml
-from awscli.compat import six
from yaml.resolver import ScalarNode, SequenceNode
+from awscli.compat import six
+
def intrinsics_multi_constructor(loader, tag_prefix, node):
"""
@@ -63,7 +64,13 @@
def yaml_parse(yamlstr):
-
- yaml.SafeLoader.add_multi_constructor("!", intrinsics_multi_constructor)
-
- return yaml.safe_load(yamlstr)
+ """Parse a yaml string"""
+ try:
+ # PyYAML doesn't support json as well as it should, so if the input
+ # is actually just json it is better to parse it with the standard
+ # json parser.
+ return json.loads(yamlstr)
+ except ValueError:
+ yaml.SafeLoader.add_multi_constructor(
+ "!", intrinsics_multi_constructor)
+ return yaml.safe_load(yamlstr)
| {"golden_diff": "diff --git a/awscli/customizations/cloudformation/yamlhelper.py b/awscli/customizations/cloudformation/yamlhelper.py\n--- a/awscli/customizations/cloudformation/yamlhelper.py\n+++ b/awscli/customizations/cloudformation/yamlhelper.py\n@@ -10,11 +10,12 @@\n # distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n # ANY KIND, either express or implied. See the License for the specific\n # language governing permissions and limitations under the License.\n-\n+import json\n import yaml\n-from awscli.compat import six\n from yaml.resolver import ScalarNode, SequenceNode\n \n+from awscli.compat import six\n+\n \n def intrinsics_multi_constructor(loader, tag_prefix, node):\n \"\"\"\n@@ -63,7 +64,13 @@\n \n \n def yaml_parse(yamlstr):\n-\n- yaml.SafeLoader.add_multi_constructor(\"!\", intrinsics_multi_constructor)\n-\n- return yaml.safe_load(yamlstr)\n+ \"\"\"Parse a yaml string\"\"\"\n+ try:\n+ # PyYAML doesn't support json as well as it should, so if the input\n+ # is actually just json it is better to parse it with the standard\n+ # json parser.\n+ return json.loads(yamlstr)\n+ except ValueError:\n+ yaml.SafeLoader.add_multi_constructor(\n+ \"!\", intrinsics_multi_constructor)\n+ return yaml.safe_load(yamlstr)\n", "issue": "cloudformation deploy does not honour tabs in JSON templates\nMost of our templates are using tabs for the indentation and everything was fine until we tried to feed the template to `aws cloudformation deploy` which did not like the template despite that every other function in AWS CLI works with the template just fine:\r\n\r\n```\r\n[galaxy@athena templates]$ aws cloudformation validate-template --template-body file://codepipeline.template\r\n{\r\n \"CapabilitiesReason\": \"The following resource(s) require capabilities: [AWS::IAM::Role]\", \r\n \"Description\": \"Playing with CodeCommit, CodeBuild, and CodeDeploy\", \r\n \"Parameters\": [\r\n```\r\nand the only function that fails to parse the template is:\r\n```\r\n[galaxy@athena templates]$ aws cloudformation deploy --stack-name \"galaxy-ccc\" --template-file codepipeline.template --capabilities CAPABILITY_IAM\r\n\r\nwhile scanning for the next token\r\nfound character '\\t' that cannot start any token\r\n in \"<string>\", line 2, column 1:\r\n \t\"AWSTemplateFormatVersion\": \"20 ... \r\n ^\r\n```\r\nA quick fix is to replace tabs with spaces:\r\n```\r\n[galaxy@athena templates]$ sed 's,\\t, ,g' codepipeline.template > c.template\r\n[galaxy@athena templates]$ aws cloudformation deploy --stack-name \"galaxy-ccc\" --template-file c.template --capabilities CAPABILITY_IAM\r\nWaiting for changeset to be created..\r\nWaiting for stack create/update to complete\r\n```\r\n\r\n... but it would mean that we would need to fix all our templates which are valid JSON just to workaround a bug in the tool! :(\n", "before_files": [{"content": "# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport yaml\nfrom awscli.compat import six\nfrom yaml.resolver import ScalarNode, SequenceNode\n\n\ndef intrinsics_multi_constructor(loader, tag_prefix, node):\n \"\"\"\n YAML constructor to parse CloudFormation intrinsics.\n This will return a dictionary with key being the instrinsic name\n \"\"\"\n\n # Get the actual tag name excluding the first exclamation\n tag = node.tag[1:]\n\n # All CloudFormation intrinsics have prefix Fn:: except Ref\n prefix = \"Fn::\"\n if tag == \"Ref\":\n prefix = \"\"\n\n cfntag = prefix + tag\n\n if tag == \"GetAtt\" and isinstance(node.value, six.string_types):\n # ShortHand notation for !GetAtt accepts Resource.Attribute format\n # while the standard notation is to use an array\n # [Resource, Attribute]. Convert shorthand to standard format\n value = node.value.split(\".\", 1)\n\n elif isinstance(node, ScalarNode):\n # Value of this node is scalar\n value = loader.construct_scalar(node)\n\n elif isinstance(node, SequenceNode):\n # Value of this node is an array (Ex: [1,2])\n value = loader.construct_sequence(node)\n\n else:\n # Value of this node is an mapping (ex: {foo: bar})\n value = loader.construct_mapping(node)\n\n return {cfntag: value}\n\n\ndef yaml_dump(dict_to_dump):\n \"\"\"\n Dumps the dictionary as a YAML document\n :param dict_to_dump:\n :return:\n \"\"\"\n return yaml.safe_dump(dict_to_dump, default_flow_style=False)\n\n\ndef yaml_parse(yamlstr):\n\n yaml.SafeLoader.add_multi_constructor(\"!\", intrinsics_multi_constructor)\n\n return yaml.safe_load(yamlstr)\n", "path": "awscli/customizations/cloudformation/yamlhelper.py"}]} | 1,538 | 304 |
gh_patches_debug_9712 | rasdani/github-patches | git_diff | huggingface__dataset-viewer-2733 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
/search and /filter are currently broken
Any search or filter is currently failing.
https://datasets-server.huggingface.co/search?dataset=gsarti%2Fflores_101&config=afr&split=devtest&offset=0&length=100&query=a
https://datasets-server.huggingface.co/filter?dataset=gsarti%2Fflores_101&config=afr&split=devtest&offset=0&length=100&where=id%3E%3D409+and+id%3C511
Logs:
```
FileNotFoundError: [Errno 2] No such file or directory: '/tmp/duckdb-index'
```
when doing `disk_stat = os.statvfs(path)` here:
https://github.com/huggingface/dataset-viewer/blob/a489c0b6ad4e5a78b2670679abbfab93f9be5742/libs/libapi/src/libapi/duckdb.py#L78
</issue>
<code>
[start of libs/libapi/src/libapi/duckdb.py]
1 # SPDX-License-Identifier: Apache-2.0
2 # Copyright 2023 The HuggingFace Authors.
3
4 import errno
5 import json
6 import logging
7 import os
8 import re
9 from hashlib import sha1
10 from typing import Optional
11
12 import anyio
13 from anyio import Path
14 from libcommon.constants import DUCKDB_INDEX_DOWNLOADS_SUBDIRECTORY, SPLIT_DUCKDB_INDEX_KIND
15 from libcommon.parquet_utils import extract_split_name_from_parquet_url
16 from libcommon.prometheus import StepProfiler
17 from libcommon.simple_cache import CacheEntry
18 from libcommon.storage import StrPath, init_dir
19 from libcommon.storage_client import StorageClient
20 from libcommon.utils import download_file_from_hub
21
22 from libapi.exceptions import DownloadIndexError
23 from libapi.utils import get_cache_entry_from_step
24
25 REPO_TYPE = "dataset"
26 HUB_DOWNLOAD_CACHE_FOLDER = "cache"
27
28
29 async def get_index_file_location_and_download_if_missing(
30 duckdb_index_file_directory: StrPath,
31 dataset: str,
32 revision: str,
33 config: str,
34 split: str,
35 filename: str,
36 size_bytes: int,
37 url: str,
38 target_revision: str,
39 hf_token: Optional[str],
40 ) -> str:
41 with StepProfiler(method="get_index_file_location_and_download_if_missing", step="all"):
42 index_folder = get_download_folder(duckdb_index_file_directory, size_bytes, dataset, config, split, revision)
43 # For directories like "partial-train" for the file
44 # at "en/partial-train/0000.parquet" in the C4 dataset.
45 # Note that "-" is forbidden for split names, so it doesn't create directory names collisions.
46 split_directory = extract_split_name_from_parquet_url(url)
47 repo_file_location = f"{config}/{split_directory}/{filename}"
48 index_file_location = f"{index_folder}/{repo_file_location}"
49 index_path = Path(index_file_location)
50 if not await index_path.is_file():
51 with StepProfiler(method="get_index_file_location_and_download_if_missing", step="download index file"):
52 cache_folder = f"{duckdb_index_file_directory}/{HUB_DOWNLOAD_CACHE_FOLDER}"
53 await anyio.to_thread.run_sync(
54 download_index_file,
55 cache_folder,
56 index_folder,
57 target_revision,
58 dataset,
59 repo_file_location,
60 hf_token,
61 )
62 # Update its modification time
63 await index_path.touch()
64 return index_file_location
65
66
67 def get_download_folder(
68 root_directory: StrPath, size_bytes: int, dataset: str, revision: str, config: str, split: str
69 ) -> str:
70 check_available_disk_space(root_directory, size_bytes)
71 payload = (dataset, config, split, revision)
72 hash_suffix = sha1(json.dumps(payload, sort_keys=True).encode(), usedforsecurity=False).hexdigest()[:8]
73 subdirectory = "".join([c if re.match(r"[\w-]", c) else "-" for c in f"{dataset}-{hash_suffix}"])
74 return f"{root_directory}/{DUCKDB_INDEX_DOWNLOADS_SUBDIRECTORY}/{subdirectory}"
75
76
77 def check_available_disk_space(path: StrPath, required_space: int) -> None:
78 disk_stat = os.statvfs(path)
79 # Calculate free space in bytes
80 free_space = disk_stat.f_bavail * disk_stat.f_frsize
81 logging.debug(f"{free_space} available space, needed {required_space}")
82 if free_space < required_space:
83 raise DownloadIndexError(
84 "Cannot perform the search due to a lack of disk space on the server. Please report the issue."
85 )
86
87
88 def download_index_file(
89 cache_folder: str,
90 index_folder: str,
91 target_revision: str,
92 dataset: str,
93 repo_file_location: str,
94 hf_token: Optional[str] = None,
95 ) -> None:
96 logging.info(f"init_dir {index_folder}")
97 try:
98 init_dir(index_folder)
99 download_file_from_hub(
100 repo_type=REPO_TYPE,
101 revision=target_revision,
102 repo_id=dataset,
103 filename=repo_file_location,
104 local_dir=index_folder,
105 hf_token=hf_token,
106 cache_dir=cache_folder,
107 )
108 except OSError as err:
109 if err.errno == errno.ENOSPC:
110 raise DownloadIndexError(
111 "Cannot perform the operation due to a lack of disk space on the server. Please report the issue.", err
112 )
113
114
115 def get_cache_entry_from_duckdb_index_job(
116 dataset: str,
117 config: str,
118 split: str,
119 hf_endpoint: str,
120 hf_token: Optional[str],
121 hf_timeout_seconds: Optional[float],
122 blocked_datasets: list[str],
123 storage_clients: Optional[list[StorageClient]] = None,
124 ) -> CacheEntry:
125 return get_cache_entry_from_step(
126 processing_step_name=SPLIT_DUCKDB_INDEX_KIND,
127 dataset=dataset,
128 config=config,
129 split=split,
130 hf_endpoint=hf_endpoint,
131 hf_token=hf_token,
132 hf_timeout_seconds=hf_timeout_seconds,
133 blocked_datasets=blocked_datasets,
134 storage_clients=storage_clients,
135 )
136
[end of libs/libapi/src/libapi/duckdb.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libs/libapi/src/libapi/duckdb.py b/libs/libapi/src/libapi/duckdb.py
--- a/libs/libapi/src/libapi/duckdb.py
+++ b/libs/libapi/src/libapi/duckdb.py
@@ -75,7 +75,12 @@
def check_available_disk_space(path: StrPath, required_space: int) -> None:
- disk_stat = os.statvfs(path)
+ try:
+ disk_stat = os.statvfs(path)
+ except FileNotFoundError:
+ # The path does not exist, we create it and
+ init_dir(path)
+ disk_stat = os.statvfs(path)
# Calculate free space in bytes
free_space = disk_stat.f_bavail * disk_stat.f_frsize
logging.debug(f"{free_space} available space, needed {required_space}")
| {"golden_diff": "diff --git a/libs/libapi/src/libapi/duckdb.py b/libs/libapi/src/libapi/duckdb.py\n--- a/libs/libapi/src/libapi/duckdb.py\n+++ b/libs/libapi/src/libapi/duckdb.py\n@@ -75,7 +75,12 @@\n \n \n def check_available_disk_space(path: StrPath, required_space: int) -> None:\n- disk_stat = os.statvfs(path)\n+ try:\n+ disk_stat = os.statvfs(path)\n+ except FileNotFoundError:\n+ # The path does not exist, we create it and\n+ init_dir(path)\n+ disk_stat = os.statvfs(path)\n # Calculate free space in bytes\n free_space = disk_stat.f_bavail * disk_stat.f_frsize\n logging.debug(f\"{free_space} available space, needed {required_space}\")\n", "issue": "/search and /filter are currently broken\nAny search or filter is currently failing.\r\n\r\nhttps://datasets-server.huggingface.co/search?dataset=gsarti%2Fflores_101&config=afr&split=devtest&offset=0&length=100&query=a\r\nhttps://datasets-server.huggingface.co/filter?dataset=gsarti%2Fflores_101&config=afr&split=devtest&offset=0&length=100&where=id%3E%3D409+and+id%3C511\r\n\r\nLogs:\r\n\r\n```\r\nFileNotFoundError: [Errno 2] No such file or directory: '/tmp/duckdb-index'\r\n```\r\n\r\nwhen doing `disk_stat = os.statvfs(path)` here: \r\n\r\nhttps://github.com/huggingface/dataset-viewer/blob/a489c0b6ad4e5a78b2670679abbfab93f9be5742/libs/libapi/src/libapi/duckdb.py#L78\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2023 The HuggingFace Authors.\n\nimport errno\nimport json\nimport logging\nimport os\nimport re\nfrom hashlib import sha1\nfrom typing import Optional\n\nimport anyio\nfrom anyio import Path\nfrom libcommon.constants import DUCKDB_INDEX_DOWNLOADS_SUBDIRECTORY, SPLIT_DUCKDB_INDEX_KIND\nfrom libcommon.parquet_utils import extract_split_name_from_parquet_url\nfrom libcommon.prometheus import StepProfiler\nfrom libcommon.simple_cache import CacheEntry\nfrom libcommon.storage import StrPath, init_dir\nfrom libcommon.storage_client import StorageClient\nfrom libcommon.utils import download_file_from_hub\n\nfrom libapi.exceptions import DownloadIndexError\nfrom libapi.utils import get_cache_entry_from_step\n\nREPO_TYPE = \"dataset\"\nHUB_DOWNLOAD_CACHE_FOLDER = \"cache\"\n\n\nasync def get_index_file_location_and_download_if_missing(\n duckdb_index_file_directory: StrPath,\n dataset: str,\n revision: str,\n config: str,\n split: str,\n filename: str,\n size_bytes: int,\n url: str,\n target_revision: str,\n hf_token: Optional[str],\n) -> str:\n with StepProfiler(method=\"get_index_file_location_and_download_if_missing\", step=\"all\"):\n index_folder = get_download_folder(duckdb_index_file_directory, size_bytes, dataset, config, split, revision)\n # For directories like \"partial-train\" for the file\n # at \"en/partial-train/0000.parquet\" in the C4 dataset.\n # Note that \"-\" is forbidden for split names, so it doesn't create directory names collisions.\n split_directory = extract_split_name_from_parquet_url(url)\n repo_file_location = f\"{config}/{split_directory}/{filename}\"\n index_file_location = f\"{index_folder}/{repo_file_location}\"\n index_path = Path(index_file_location)\n if not await index_path.is_file():\n with StepProfiler(method=\"get_index_file_location_and_download_if_missing\", step=\"download index file\"):\n cache_folder = f\"{duckdb_index_file_directory}/{HUB_DOWNLOAD_CACHE_FOLDER}\"\n await anyio.to_thread.run_sync(\n download_index_file,\n cache_folder,\n index_folder,\n target_revision,\n dataset,\n repo_file_location,\n hf_token,\n )\n # Update its modification time\n await index_path.touch()\n return index_file_location\n\n\ndef get_download_folder(\n root_directory: StrPath, size_bytes: int, dataset: str, revision: str, config: str, split: str\n) -> str:\n check_available_disk_space(root_directory, size_bytes)\n payload = (dataset, config, split, revision)\n hash_suffix = sha1(json.dumps(payload, sort_keys=True).encode(), usedforsecurity=False).hexdigest()[:8]\n subdirectory = \"\".join([c if re.match(r\"[\\w-]\", c) else \"-\" for c in f\"{dataset}-{hash_suffix}\"])\n return f\"{root_directory}/{DUCKDB_INDEX_DOWNLOADS_SUBDIRECTORY}/{subdirectory}\"\n\n\ndef check_available_disk_space(path: StrPath, required_space: int) -> None:\n disk_stat = os.statvfs(path)\n # Calculate free space in bytes\n free_space = disk_stat.f_bavail * disk_stat.f_frsize\n logging.debug(f\"{free_space} available space, needed {required_space}\")\n if free_space < required_space:\n raise DownloadIndexError(\n \"Cannot perform the search due to a lack of disk space on the server. Please report the issue.\"\n )\n\n\ndef download_index_file(\n cache_folder: str,\n index_folder: str,\n target_revision: str,\n dataset: str,\n repo_file_location: str,\n hf_token: Optional[str] = None,\n) -> None:\n logging.info(f\"init_dir {index_folder}\")\n try:\n init_dir(index_folder)\n download_file_from_hub(\n repo_type=REPO_TYPE,\n revision=target_revision,\n repo_id=dataset,\n filename=repo_file_location,\n local_dir=index_folder,\n hf_token=hf_token,\n cache_dir=cache_folder,\n )\n except OSError as err:\n if err.errno == errno.ENOSPC:\n raise DownloadIndexError(\n \"Cannot perform the operation due to a lack of disk space on the server. Please report the issue.\", err\n )\n\n\ndef get_cache_entry_from_duckdb_index_job(\n dataset: str,\n config: str,\n split: str,\n hf_endpoint: str,\n hf_token: Optional[str],\n hf_timeout_seconds: Optional[float],\n blocked_datasets: list[str],\n storage_clients: Optional[list[StorageClient]] = None,\n) -> CacheEntry:\n return get_cache_entry_from_step(\n processing_step_name=SPLIT_DUCKDB_INDEX_KIND,\n dataset=dataset,\n config=config,\n split=split,\n hf_endpoint=hf_endpoint,\n hf_token=hf_token,\n hf_timeout_seconds=hf_timeout_seconds,\n blocked_datasets=blocked_datasets,\n storage_clients=storage_clients,\n )\n", "path": "libs/libapi/src/libapi/duckdb.py"}]} | 2,161 | 183 |
gh_patches_debug_10519 | rasdani/github-patches | git_diff | ansible__ansible-34845 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
aci_epg_to_domain: Module does not need to support OpenStack
_From @nilloBE on October 10, 2017 9:11_
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
aci_epg_to_domain
##### ANSIBLE VERSION
v2.5
##### SUMMARY
The module aci_epg_to_domain allows mapping an EPG to a Physical or Virtual Domain in ACI. The Virtual Domain allows the user to specify the domain type selecting VMware, Microsoft or OpenStack.
In this setting, OpenStack should be removed because the OpenStack EPGs should not be created or manipulated directly via Ansible, but instead via Neutron.
_Copied from original issue: datacenter/aci-ansible#163_
</issue>
<code>
[start of lib/ansible/modules/network/aci/aci_epg_to_domain.py]
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
5
6 from __future__ import absolute_import, division, print_function
7 __metaclass__ = type
8
9 ANSIBLE_METADATA = {'metadata_version': '1.1',
10 'status': ['preview'],
11 'supported_by': 'community'}
12
13 DOCUMENTATION = r'''
14 ---
15 module: aci_epg_to_domain
16 short_description: Bind EPGs to Domains on Cisco ACI fabrics (fv:RsDomAtt)
17 description:
18 - Bind EPGs to Physical and Virtual Domains on Cisco ACI fabrics.
19 - More information from the internal APIC class I(fv:RsDomAtt) at
20 U(https://developer.cisco.com/media/mim-ref/MO-fvRsDomAtt.html).
21 author:
22 - Jacob McGill (@jmcgill298)
23 version_added: '2.4'
24 notes:
25 - The C(tenant), C(ap), C(epg), and C(domain) used must exist before using this module in your playbook.
26 The M(aci_tenant) M(aci_ap), M(aci_epg) M(aci_domain) modules can be used for this.
27 options:
28 allow_useg:
29 description:
30 - Allows micro-segmentation.
31 - The APIC defaults new EPG to Domain bindings to use C(encap).
32 choices: [ encap, useg ]
33 default: encap
34 ap:
35 description:
36 - Name of an existing application network profile, that will contain the EPGs.
37 aliases: [ app_profile, app_profile_name ]
38 deploy_immediacy:
39 description:
40 - Determines when the policy is pushed to hardware Policy CAM.
41 - The APIC defaults new EPG to Domain bindings to C(lazy).
42 choices: [ immediate, lazy ]
43 default: lazy
44 domain:
45 description:
46 - Name of the physical or virtual domain being associated with the EPG.
47 aliases: [ domain_name, domain_profile ]
48 domain_type:
49 description:
50 - Determines if the Domain is physical (phys) or virtual (vmm).
51 choices: [ phys, vmm ]
52 aliases: [ type ]
53 encap:
54 description:
55 - The VLAN encapsulation for the EPG when binding a VMM Domain with static encap_mode.
56 - This acts as the secondary encap when using useg.
57 choices: [ range from 1 to 4096 ]
58 encap_mode:
59 description:
60 - The ecapsulataion method to be used.
61 - The APIC defaults new EPG to Domain bindings to C(auto).
62 choices: [ auto, vlan, vxlan ]
63 default: auto
64 epg:
65 description:
66 - Name of the end point group.
67 aliases: [ epg_name ]
68 netflow:
69 description:
70 - Determines if netflow should be enabled.
71 - The APIC defaults new EPG to Domain binings to C(disabled).
72 choices: [ disabled, enabled ]
73 default: disabled
74 primary_encap:
75 description:
76 - Determines the primary VLAN ID when using useg.
77 choices: [ range from 1 to 4096 ]
78 resolution_immediacy:
79 description:
80 - Determines when the policies should be resolved and available.
81 - The APIC defaults new EPG to Domain bindings to C(lazy).
82 choices: [ immediate, lazy, pre-provision ]
83 default: lazy
84 state:
85 description:
86 - Use C(present) or C(absent) for adding or removing.
87 - Use C(query) for listing an object or multiple objects.
88 choices: [ absent, present, query ]
89 default: present
90 tenant:
91 description:
92 - Name of an existing tenant.
93 aliases: [ tenant_name ]
94 vm_provider:
95 description:
96 - The VM platform for VMM Domains.
97 choices: [ microsoft, openstack, vmware ]
98 extends_documentation_fragment: aci
99 '''
100
101 EXAMPLES = r''' # '''
102
103 RETURN = r''' # '''
104
105 from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
106 from ansible.module_utils.basic import AnsibleModule
107
108 VM_PROVIDER_MAPPING = dict(
109 microsoft="uni/vmmp-Microsoft/dom-",
110 openstack="uni/vmmp-OpenStack/dom-",
111 vmware="uni/vmmp-VMware/dom-",
112 )
113
114
115 def main():
116 argument_spec = aci_argument_spec
117 argument_spec.update(
118 allow_useg=dict(type='str', choices=['encap', 'useg']),
119 ap=dict(type='str', aliases=['app_profile', 'app_profile_name']),
120 deploy_immediacy=dict(type='str', choices=['immediate', 'on-demand']),
121 domain=dict(type='str', aliases=['domain_name', 'domain_profile']),
122 domain_type=dict(type='str', choices=['phys', 'vmm'], aliases=['type']),
123 encap=dict(type='int'),
124 encap_mode=dict(type='str', choices=['auto', 'vlan', 'vxlan']),
125 epg=dict(type='str', aliases=['name', 'epg_name']),
126 netflow=dict(type='str', choices=['disabled', 'enabled']),
127 primary_encap=dict(type='int'),
128 resolution_immediacy=dict(type='str', choices=['immediate', 'lazy', 'pre-provision']),
129 state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
130 tenant=dict(type='str', aliases=['tenant_name']),
131 vm_provider=dict(type='str', choices=['microsoft', 'openstack', 'vmware']),
132 method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6
133 )
134
135 module = AnsibleModule(
136 argument_spec=argument_spec,
137 supports_check_mode=True,
138 required_if=[
139 ['domain_type', 'vmm', ['vm_provider']],
140 ['state', 'absent', ['ap', 'domain', 'domain_type', 'epg', 'tenant']],
141 ['state', 'present', ['ap', 'domain', 'domain_type', 'epg', 'tenant']],
142 ],
143 )
144
145 allow_useg = module.params['allow_useg']
146 ap = module.params['ap']
147 deploy_immediacy = module.params['deploy_immediacy']
148 domain = module.params['domain']
149 domain_type = module.params['domain_type']
150 vm_provider = module.params['vm_provider']
151 encap = module.params['encap']
152 if encap is not None:
153 if encap in range(1, 4097):
154 encap = 'vlan-{0}'.format(encap)
155 else:
156 module.fail_json(msg='Valid VLAN assigments are from 1 to 4096')
157 encap_mode = module.params['encap_mode']
158 epg = module.params['epg']
159 netflow = module.params['netflow']
160 primary_encap = module.params['primary_encap']
161 if primary_encap is not None:
162 if primary_encap in range(1, 4097):
163 primary_encap = 'vlan-{0}'.format(primary_encap)
164 else:
165 module.fail_json(msg='Valid VLAN assigments are from 1 to 4096')
166 resolution_immediacy = module.params['resolution_immediacy']
167 state = module.params['state']
168 tenant = module.params['tenant']
169
170 if domain_type == 'phys' and vm_provider is not None:
171 module.fail_json(msg="Domain type 'phys' cannot have a 'vm_provider'")
172
173 # Compile the full domain for URL building
174 if domain_type == 'vmm':
175 epg_domain = '{0}{1}'.format(VM_PROVIDER_MAPPING[vm_provider], domain)
176 elif domain_type is not None:
177 epg_domain = 'uni/phys-{0}'.format(domain)
178 else:
179 epg_domain = None
180
181 aci = ACIModule(module)
182 aci.construct_url(
183 root_class=dict(
184 aci_class='fvTenant',
185 aci_rn='tn-{0}'.format(tenant),
186 filter_target='eq(fvTenant.name, "{0}")'.format(tenant),
187 module_object=tenant,
188 ),
189 subclass_1=dict(
190 aci_class='fvAp',
191 aci_rn='ap-{0}'.format(ap),
192 filter_target='eq(fvAp.name, "{0}")'.format(ap),
193 module_object=ap,
194 ),
195 subclass_2=dict(
196 aci_class='fvAEPg',
197 aci_rn='epg-{0}'.format(epg),
198 filter_target='eq(fvTenant.name, "{0}")'.format(epg),
199 module_object=epg,
200 ),
201 subclass_3=dict(
202 aci_class='fvRsDomAtt',
203 aci_rn='rsdomAtt-[{0}]'.format(epg_domain),
204 filter_target='eq(fvRsDomAtt.tDn, "{0}")'.format(epg_domain),
205 module_object=epg_domain,
206 ),
207 )
208
209 aci.get_existing()
210
211 if state == 'present':
212 # Filter out module parameters with null values
213 aci.payload(
214 aci_class='fvRsDomAtt',
215 class_config=dict(
216 classPref=allow_useg,
217 encap=encap,
218 encapMode=encap_mode,
219 instrImedcy=deploy_immediacy,
220 netflowPref=netflow,
221 primaryEncap=primary_encap,
222 resImedcy=resolution_immediacy,
223 ),
224 )
225
226 # Generate config diff which will be used as POST request body
227 aci.get_diff(aci_class='fvRsDomAtt')
228
229 # Submit changes if module not in check_mode and the proposed is different than existing
230 aci.post_config()
231
232 elif state == 'absent':
233 aci.delete_config()
234
235 module.exit_json(**aci.result)
236
237
238 if __name__ == "__main__":
239 main()
240
[end of lib/ansible/modules/network/aci/aci_epg_to_domain.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/ansible/modules/network/aci/aci_epg_to_domain.py b/lib/ansible/modules/network/aci/aci_epg_to_domain.py
--- a/lib/ansible/modules/network/aci/aci_epg_to_domain.py
+++ b/lib/ansible/modules/network/aci/aci_epg_to_domain.py
@@ -24,6 +24,9 @@
notes:
- The C(tenant), C(ap), C(epg), and C(domain) used must exist before using this module in your playbook.
The M(aci_tenant) M(aci_ap), M(aci_epg) M(aci_domain) modules can be used for this.
+- OpenStack VMM domains must not be created using this module. The OpenStack VMM domain is created directly
+ by the Cisco APIC Neutron plugin as part of the installation and configuration.
+ This module can be used to query status of an OpenStack VMM domain.
options:
allow_useg:
description:
| {"golden_diff": "diff --git a/lib/ansible/modules/network/aci/aci_epg_to_domain.py b/lib/ansible/modules/network/aci/aci_epg_to_domain.py\n--- a/lib/ansible/modules/network/aci/aci_epg_to_domain.py\n+++ b/lib/ansible/modules/network/aci/aci_epg_to_domain.py\n@@ -24,6 +24,9 @@\n notes:\n - The C(tenant), C(ap), C(epg), and C(domain) used must exist before using this module in your playbook.\n The M(aci_tenant) M(aci_ap), M(aci_epg) M(aci_domain) modules can be used for this.\n+- OpenStack VMM domains must not be created using this module. The OpenStack VMM domain is created directly\n+ by the Cisco APIC Neutron plugin as part of the installation and configuration.\n+ This module can be used to query status of an OpenStack VMM domain.\n options:\n allow_useg:\n description:\n", "issue": "aci_epg_to_domain: Module does not need to support OpenStack\n_From @nilloBE on October 10, 2017 9:11_\r\n\r\n##### ISSUE TYPE\r\n - Bug Report\r\n\r\n##### COMPONENT NAME\r\naci_epg_to_domain\r\n\r\n##### ANSIBLE VERSION\r\nv2.5\r\n\r\n##### SUMMARY\r\nThe module aci_epg_to_domain allows mapping an EPG to a Physical or Virtual Domain in ACI. The Virtual Domain allows the user to specify the domain type selecting VMware, Microsoft or OpenStack.\r\n\r\nIn this setting, OpenStack should be removed because the OpenStack EPGs should not be created or manipulated directly via Ansible, but instead via Neutron. \r\n\r\n_Copied from original issue: datacenter/aci-ansible#163_\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\nDOCUMENTATION = r'''\n---\nmodule: aci_epg_to_domain\nshort_description: Bind EPGs to Domains on Cisco ACI fabrics (fv:RsDomAtt)\ndescription:\n- Bind EPGs to Physical and Virtual Domains on Cisco ACI fabrics.\n- More information from the internal APIC class I(fv:RsDomAtt) at\n U(https://developer.cisco.com/media/mim-ref/MO-fvRsDomAtt.html).\nauthor:\n- Jacob McGill (@jmcgill298)\nversion_added: '2.4'\nnotes:\n- The C(tenant), C(ap), C(epg), and C(domain) used must exist before using this module in your playbook.\n The M(aci_tenant) M(aci_ap), M(aci_epg) M(aci_domain) modules can be used for this.\noptions:\n allow_useg:\n description:\n - Allows micro-segmentation.\n - The APIC defaults new EPG to Domain bindings to use C(encap).\n choices: [ encap, useg ]\n default: encap\n ap:\n description:\n - Name of an existing application network profile, that will contain the EPGs.\n aliases: [ app_profile, app_profile_name ]\n deploy_immediacy:\n description:\n - Determines when the policy is pushed to hardware Policy CAM.\n - The APIC defaults new EPG to Domain bindings to C(lazy).\n choices: [ immediate, lazy ]\n default: lazy\n domain:\n description:\n - Name of the physical or virtual domain being associated with the EPG.\n aliases: [ domain_name, domain_profile ]\n domain_type:\n description:\n - Determines if the Domain is physical (phys) or virtual (vmm).\n choices: [ phys, vmm ]\n aliases: [ type ]\n encap:\n description:\n - The VLAN encapsulation for the EPG when binding a VMM Domain with static encap_mode.\n - This acts as the secondary encap when using useg.\n choices: [ range from 1 to 4096 ]\n encap_mode:\n description:\n - The ecapsulataion method to be used.\n - The APIC defaults new EPG to Domain bindings to C(auto).\n choices: [ auto, vlan, vxlan ]\n default: auto\n epg:\n description:\n - Name of the end point group.\n aliases: [ epg_name ]\n netflow:\n description:\n - Determines if netflow should be enabled.\n - The APIC defaults new EPG to Domain binings to C(disabled).\n choices: [ disabled, enabled ]\n default: disabled\n primary_encap:\n description:\n - Determines the primary VLAN ID when using useg.\n choices: [ range from 1 to 4096 ]\n resolution_immediacy:\n description:\n - Determines when the policies should be resolved and available.\n - The APIC defaults new EPG to Domain bindings to C(lazy).\n choices: [ immediate, lazy, pre-provision ]\n default: lazy\n state:\n description:\n - Use C(present) or C(absent) for adding or removing.\n - Use C(query) for listing an object or multiple objects.\n choices: [ absent, present, query ]\n default: present\n tenant:\n description:\n - Name of an existing tenant.\n aliases: [ tenant_name ]\n vm_provider:\n description:\n - The VM platform for VMM Domains.\n choices: [ microsoft, openstack, vmware ]\nextends_documentation_fragment: aci\n'''\n\nEXAMPLES = r''' # '''\n\nRETURN = r''' # '''\n\nfrom ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec\nfrom ansible.module_utils.basic import AnsibleModule\n\nVM_PROVIDER_MAPPING = dict(\n microsoft=\"uni/vmmp-Microsoft/dom-\",\n openstack=\"uni/vmmp-OpenStack/dom-\",\n vmware=\"uni/vmmp-VMware/dom-\",\n)\n\n\ndef main():\n argument_spec = aci_argument_spec\n argument_spec.update(\n allow_useg=dict(type='str', choices=['encap', 'useg']),\n ap=dict(type='str', aliases=['app_profile', 'app_profile_name']),\n deploy_immediacy=dict(type='str', choices=['immediate', 'on-demand']),\n domain=dict(type='str', aliases=['domain_name', 'domain_profile']),\n domain_type=dict(type='str', choices=['phys', 'vmm'], aliases=['type']),\n encap=dict(type='int'),\n encap_mode=dict(type='str', choices=['auto', 'vlan', 'vxlan']),\n epg=dict(type='str', aliases=['name', 'epg_name']),\n netflow=dict(type='str', choices=['disabled', 'enabled']),\n primary_encap=dict(type='int'),\n resolution_immediacy=dict(type='str', choices=['immediate', 'lazy', 'pre-provision']),\n state=dict(type='str', default='present', choices=['absent', 'present', 'query']),\n tenant=dict(type='str', aliases=['tenant_name']),\n vm_provider=dict(type='str', choices=['microsoft', 'openstack', 'vmware']),\n method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6\n )\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n supports_check_mode=True,\n required_if=[\n ['domain_type', 'vmm', ['vm_provider']],\n ['state', 'absent', ['ap', 'domain', 'domain_type', 'epg', 'tenant']],\n ['state', 'present', ['ap', 'domain', 'domain_type', 'epg', 'tenant']],\n ],\n )\n\n allow_useg = module.params['allow_useg']\n ap = module.params['ap']\n deploy_immediacy = module.params['deploy_immediacy']\n domain = module.params['domain']\n domain_type = module.params['domain_type']\n vm_provider = module.params['vm_provider']\n encap = module.params['encap']\n if encap is not None:\n if encap in range(1, 4097):\n encap = 'vlan-{0}'.format(encap)\n else:\n module.fail_json(msg='Valid VLAN assigments are from 1 to 4096')\n encap_mode = module.params['encap_mode']\n epg = module.params['epg']\n netflow = module.params['netflow']\n primary_encap = module.params['primary_encap']\n if primary_encap is not None:\n if primary_encap in range(1, 4097):\n primary_encap = 'vlan-{0}'.format(primary_encap)\n else:\n module.fail_json(msg='Valid VLAN assigments are from 1 to 4096')\n resolution_immediacy = module.params['resolution_immediacy']\n state = module.params['state']\n tenant = module.params['tenant']\n\n if domain_type == 'phys' and vm_provider is not None:\n module.fail_json(msg=\"Domain type 'phys' cannot have a 'vm_provider'\")\n\n # Compile the full domain for URL building\n if domain_type == 'vmm':\n epg_domain = '{0}{1}'.format(VM_PROVIDER_MAPPING[vm_provider], domain)\n elif domain_type is not None:\n epg_domain = 'uni/phys-{0}'.format(domain)\n else:\n epg_domain = None\n\n aci = ACIModule(module)\n aci.construct_url(\n root_class=dict(\n aci_class='fvTenant',\n aci_rn='tn-{0}'.format(tenant),\n filter_target='eq(fvTenant.name, \"{0}\")'.format(tenant),\n module_object=tenant,\n ),\n subclass_1=dict(\n aci_class='fvAp',\n aci_rn='ap-{0}'.format(ap),\n filter_target='eq(fvAp.name, \"{0}\")'.format(ap),\n module_object=ap,\n ),\n subclass_2=dict(\n aci_class='fvAEPg',\n aci_rn='epg-{0}'.format(epg),\n filter_target='eq(fvTenant.name, \"{0}\")'.format(epg),\n module_object=epg,\n ),\n subclass_3=dict(\n aci_class='fvRsDomAtt',\n aci_rn='rsdomAtt-[{0}]'.format(epg_domain),\n filter_target='eq(fvRsDomAtt.tDn, \"{0}\")'.format(epg_domain),\n module_object=epg_domain,\n ),\n )\n\n aci.get_existing()\n\n if state == 'present':\n # Filter out module parameters with null values\n aci.payload(\n aci_class='fvRsDomAtt',\n class_config=dict(\n classPref=allow_useg,\n encap=encap,\n encapMode=encap_mode,\n instrImedcy=deploy_immediacy,\n netflowPref=netflow,\n primaryEncap=primary_encap,\n resImedcy=resolution_immediacy,\n ),\n )\n\n # Generate config diff which will be used as POST request body\n aci.get_diff(aci_class='fvRsDomAtt')\n\n # Submit changes if module not in check_mode and the proposed is different than existing\n aci.post_config()\n\n elif state == 'absent':\n aci.delete_config()\n\n module.exit_json(**aci.result)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "lib/ansible/modules/network/aci/aci_epg_to_domain.py"}]} | 3,511 | 211 |
gh_patches_debug_15678 | rasdani/github-patches | git_diff | celery__celery-8338 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
database backend does not store children
The [`Task`](https://github.com/celery/celery/blob/main/celery/backends/database/models.py#L20-L27) and [`TaskExtended`](https://github.com/celery/celery/blob/main/celery/backends/database/models.py#L57-L62) models for the `database` backend do not include `children`. This means that when using any `database` backend, [`AsyncResult.children`](https://github.com/celery/celery/blob/main/celery/result.py#L424) is always empty, even if a task does have children.
</issue>
<code>
[start of celery/backends/database/models.py]
1 """Database models used by the SQLAlchemy result store backend."""
2 from datetime import datetime
3
4 import sqlalchemy as sa
5 from sqlalchemy.types import PickleType
6
7 from celery import states
8
9 from .session import ResultModelBase
10
11 __all__ = ('Task', 'TaskExtended', 'TaskSet')
12
13
14 class Task(ResultModelBase):
15 """Task result/status."""
16
17 __tablename__ = 'celery_taskmeta'
18 __table_args__ = {'sqlite_autoincrement': True}
19
20 id = sa.Column(sa.Integer, sa.Sequence('task_id_sequence'),
21 primary_key=True, autoincrement=True)
22 task_id = sa.Column(sa.String(155), unique=True)
23 status = sa.Column(sa.String(50), default=states.PENDING)
24 result = sa.Column(PickleType, nullable=True)
25 date_done = sa.Column(sa.DateTime, default=datetime.utcnow,
26 onupdate=datetime.utcnow, nullable=True)
27 traceback = sa.Column(sa.Text, nullable=True)
28
29 def __init__(self, task_id):
30 self.task_id = task_id
31
32 def to_dict(self):
33 return {
34 'task_id': self.task_id,
35 'status': self.status,
36 'result': self.result,
37 'traceback': self.traceback,
38 'date_done': self.date_done,
39 }
40
41 def __repr__(self):
42 return '<Task {0.task_id} state: {0.status}>'.format(self)
43
44 @classmethod
45 def configure(cls, schema=None, name=None):
46 cls.__table__.schema = schema
47 cls.id.default.schema = schema
48 cls.__table__.name = name or cls.__tablename__
49
50
51 class TaskExtended(Task):
52 """For the extend result."""
53
54 __tablename__ = 'celery_taskmeta'
55 __table_args__ = {'sqlite_autoincrement': True, 'extend_existing': True}
56
57 name = sa.Column(sa.String(155), nullable=True)
58 args = sa.Column(sa.LargeBinary, nullable=True)
59 kwargs = sa.Column(sa.LargeBinary, nullable=True)
60 worker = sa.Column(sa.String(155), nullable=True)
61 retries = sa.Column(sa.Integer, nullable=True)
62 queue = sa.Column(sa.String(155), nullable=True)
63
64 def to_dict(self):
65 task_dict = super().to_dict()
66 task_dict.update({
67 'name': self.name,
68 'args': self.args,
69 'kwargs': self.kwargs,
70 'worker': self.worker,
71 'retries': self.retries,
72 'queue': self.queue,
73 })
74 return task_dict
75
76
77 class TaskSet(ResultModelBase):
78 """TaskSet result."""
79
80 __tablename__ = 'celery_tasksetmeta'
81 __table_args__ = {'sqlite_autoincrement': True}
82
83 id = sa.Column(sa.Integer, sa.Sequence('taskset_id_sequence'),
84 autoincrement=True, primary_key=True)
85 taskset_id = sa.Column(sa.String(155), unique=True)
86 result = sa.Column(PickleType, nullable=True)
87 date_done = sa.Column(sa.DateTime, default=datetime.utcnow,
88 nullable=True)
89
90 def __init__(self, taskset_id, result):
91 self.taskset_id = taskset_id
92 self.result = result
93
94 def to_dict(self):
95 return {
96 'taskset_id': self.taskset_id,
97 'result': self.result,
98 'date_done': self.date_done,
99 }
100
101 def __repr__(self):
102 return f'<TaskSet: {self.taskset_id}>'
103
104 @classmethod
105 def configure(cls, schema=None, name=None):
106 cls.__table__.schema = schema
107 cls.id.default.schema = schema
108 cls.__table__.name = name or cls.__tablename__
109
[end of celery/backends/database/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/celery/backends/database/models.py b/celery/backends/database/models.py
--- a/celery/backends/database/models.py
+++ b/celery/backends/database/models.py
@@ -25,6 +25,7 @@
date_done = sa.Column(sa.DateTime, default=datetime.utcnow,
onupdate=datetime.utcnow, nullable=True)
traceback = sa.Column(sa.Text, nullable=True)
+ children = sa.Column(PickleType, nullable=True)
def __init__(self, task_id):
self.task_id = task_id
@@ -36,6 +37,7 @@
'result': self.result,
'traceback': self.traceback,
'date_done': self.date_done,
+ 'children': self.children,
}
def __repr__(self):
| {"golden_diff": "diff --git a/celery/backends/database/models.py b/celery/backends/database/models.py\n--- a/celery/backends/database/models.py\n+++ b/celery/backends/database/models.py\n@@ -25,6 +25,7 @@\n date_done = sa.Column(sa.DateTime, default=datetime.utcnow,\n onupdate=datetime.utcnow, nullable=True)\n traceback = sa.Column(sa.Text, nullable=True)\n+ children = sa.Column(PickleType, nullable=True)\n \n def __init__(self, task_id):\n self.task_id = task_id\n@@ -36,6 +37,7 @@\n 'result': self.result,\n 'traceback': self.traceback,\n 'date_done': self.date_done,\n+ 'children': self.children,\n }\n \n def __repr__(self):\n", "issue": "database backend does not store children\nThe [`Task`](https://github.com/celery/celery/blob/main/celery/backends/database/models.py#L20-L27) and [`TaskExtended`](https://github.com/celery/celery/blob/main/celery/backends/database/models.py#L57-L62) models for the `database` backend do not include `children`. This means that when using any `database` backend, [`AsyncResult.children`](https://github.com/celery/celery/blob/main/celery/result.py#L424) is always empty, even if a task does have children.\n", "before_files": [{"content": "\"\"\"Database models used by the SQLAlchemy result store backend.\"\"\"\nfrom datetime import datetime\n\nimport sqlalchemy as sa\nfrom sqlalchemy.types import PickleType\n\nfrom celery import states\n\nfrom .session import ResultModelBase\n\n__all__ = ('Task', 'TaskExtended', 'TaskSet')\n\n\nclass Task(ResultModelBase):\n \"\"\"Task result/status.\"\"\"\n\n __tablename__ = 'celery_taskmeta'\n __table_args__ = {'sqlite_autoincrement': True}\n\n id = sa.Column(sa.Integer, sa.Sequence('task_id_sequence'),\n primary_key=True, autoincrement=True)\n task_id = sa.Column(sa.String(155), unique=True)\n status = sa.Column(sa.String(50), default=states.PENDING)\n result = sa.Column(PickleType, nullable=True)\n date_done = sa.Column(sa.DateTime, default=datetime.utcnow,\n onupdate=datetime.utcnow, nullable=True)\n traceback = sa.Column(sa.Text, nullable=True)\n\n def __init__(self, task_id):\n self.task_id = task_id\n\n def to_dict(self):\n return {\n 'task_id': self.task_id,\n 'status': self.status,\n 'result': self.result,\n 'traceback': self.traceback,\n 'date_done': self.date_done,\n }\n\n def __repr__(self):\n return '<Task {0.task_id} state: {0.status}>'.format(self)\n\n @classmethod\n def configure(cls, schema=None, name=None):\n cls.__table__.schema = schema\n cls.id.default.schema = schema\n cls.__table__.name = name or cls.__tablename__\n\n\nclass TaskExtended(Task):\n \"\"\"For the extend result.\"\"\"\n\n __tablename__ = 'celery_taskmeta'\n __table_args__ = {'sqlite_autoincrement': True, 'extend_existing': True}\n\n name = sa.Column(sa.String(155), nullable=True)\n args = sa.Column(sa.LargeBinary, nullable=True)\n kwargs = sa.Column(sa.LargeBinary, nullable=True)\n worker = sa.Column(sa.String(155), nullable=True)\n retries = sa.Column(sa.Integer, nullable=True)\n queue = sa.Column(sa.String(155), nullable=True)\n\n def to_dict(self):\n task_dict = super().to_dict()\n task_dict.update({\n 'name': self.name,\n 'args': self.args,\n 'kwargs': self.kwargs,\n 'worker': self.worker,\n 'retries': self.retries,\n 'queue': self.queue,\n })\n return task_dict\n\n\nclass TaskSet(ResultModelBase):\n \"\"\"TaskSet result.\"\"\"\n\n __tablename__ = 'celery_tasksetmeta'\n __table_args__ = {'sqlite_autoincrement': True}\n\n id = sa.Column(sa.Integer, sa.Sequence('taskset_id_sequence'),\n autoincrement=True, primary_key=True)\n taskset_id = sa.Column(sa.String(155), unique=True)\n result = sa.Column(PickleType, nullable=True)\n date_done = sa.Column(sa.DateTime, default=datetime.utcnow,\n nullable=True)\n\n def __init__(self, taskset_id, result):\n self.taskset_id = taskset_id\n self.result = result\n\n def to_dict(self):\n return {\n 'taskset_id': self.taskset_id,\n 'result': self.result,\n 'date_done': self.date_done,\n }\n\n def __repr__(self):\n return f'<TaskSet: {self.taskset_id}>'\n\n @classmethod\n def configure(cls, schema=None, name=None):\n cls.__table__.schema = schema\n cls.id.default.schema = schema\n cls.__table__.name = name or cls.__tablename__\n", "path": "celery/backends/database/models.py"}]} | 1,687 | 176 |
gh_patches_debug_19000 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-contrib-478 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
FastAPI Instrumentor not populating attributes required for Azure exporter
**Describe your environment**
**Package Version - azure-monitor-opentelemetry-exporter = {version = "^1.0.0-beta.3", allow-prereleases = true}
opentelemetry-instrumentation-fastapi = "^0.19b0"
Operating System - macOS Mojave v10.14.6**
**Python Version - v3.9.0**
**Steps to reproduce**
When using the azure-monitor-opentelemetry-exporter in conjunction with opentelemetry-instrumentation-fastapi
the response code is not populated by the exporter, raised this issue with Azure monitor team and was redirected here;
> Looking at the output, it seems like the spans are missing some attributes. This is probably an issue in the opentelemetry-instrumentation-asgi, could you file an issue here for this problem specifically? Basically, the exporter sees if http.status_code is populated in the span attributes to populate response_Code in app insights. Since the attribute is not there, this is why you are seeing 0s. Normally, it is up to the instrumentation to populate this.
[Azure monitor issue#17839](https://github.com/Azure/azure-sdk-for-python/issues/17839)
Spans obtained from console exporter:
- one for successful - 201 request
- one for failed - 422 request
- one on exit
[json.zip](https://github.com/open-telemetry/opentelemetry-python-contrib/files/6281056/json.zip)
Screen grab from Azure App Insights indication reponse code as 0:

**What is the expected behavior?**
Compatibility with Azure monitor exporter
**Additional context**
Detailed context for this issue
[Azure monitor issue#17839](https://github.com/Azure/azure-sdk-for-python/issues/17839)
Please let me know in case of additional info required.
Thanks.
</issue>
<code>
[start of instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 The opentelemetry-instrumentation-asgi package provides an ASGI middleware that can be used
17 on any ASGI framework (such as Django-channels / Quart) to track requests
18 timing through OpenTelemetry.
19 """
20
21 import typing
22 import urllib
23 from functools import wraps
24 from typing import Tuple
25
26 from asgiref.compatibility import guarantee_single_callable
27
28 from opentelemetry import context, trace
29 from opentelemetry.instrumentation.asgi.version import __version__ # noqa
30 from opentelemetry.instrumentation.utils import http_status_to_status_code
31 from opentelemetry.propagate import extract
32 from opentelemetry.propagators.textmap import Getter
33 from opentelemetry.semconv.trace import SpanAttributes
34 from opentelemetry.trace.status import Status, StatusCode
35
36
37 class ASGIGetter(Getter):
38 def get(
39 self, carrier: dict, key: str
40 ) -> typing.Optional[typing.List[str]]:
41 """Getter implementation to retrieve a HTTP header value from the ASGI
42 scope.
43
44 Args:
45 carrier: ASGI scope object
46 key: header name in scope
47 Returns:
48 A list with a single string with the header value if it exists,
49 else None.
50 """
51 headers = carrier.get("headers")
52 if not headers:
53 return None
54
55 # asgi header keys are in lower case
56 key = key.lower()
57 decoded = [
58 _value.decode("utf8")
59 for (_key, _value) in headers
60 if _key.decode("utf8") == key
61 ]
62 if not decoded:
63 return None
64 return decoded
65
66 def keys(self, carrier: dict) -> typing.List[str]:
67 return list(carrier.keys())
68
69
70 asgi_getter = ASGIGetter()
71
72
73 def collect_request_attributes(scope):
74 """Collects HTTP request attributes from the ASGI scope and returns a
75 dictionary to be used as span creation attributes."""
76 server_host, port, http_url = get_host_port_url_tuple(scope)
77 query_string = scope.get("query_string")
78 if query_string and http_url:
79 if isinstance(query_string, bytes):
80 query_string = query_string.decode("utf8")
81 http_url = http_url + ("?" + urllib.parse.unquote(query_string))
82
83 result = {
84 SpanAttributes.HTTP_SCHEME: scope.get("scheme"),
85 SpanAttributes.HTTP_HOST: server_host,
86 SpanAttributes.NET_HOST_PORT: port,
87 SpanAttributes.HTTP_FLAVOR: scope.get("http_version"),
88 SpanAttributes.HTTP_TARGET: scope.get("path"),
89 SpanAttributes.HTTP_URL: http_url,
90 }
91 http_method = scope.get("method")
92 if http_method:
93 result[SpanAttributes.HTTP_METHOD] = http_method
94
95 http_host_value_list = asgi_getter.get(scope, "host")
96 if http_host_value_list:
97 result[SpanAttributes.HTTP_SERVER_NAME] = ",".join(
98 http_host_value_list
99 )
100 http_user_agent = asgi_getter.get(scope, "user-agent")
101 if http_user_agent:
102 result[SpanAttributes.HTTP_USER_AGENT] = http_user_agent[0]
103
104 if "client" in scope and scope["client"] is not None:
105 result[SpanAttributes.NET_PEER_IP] = scope.get("client")[0]
106 result[SpanAttributes.NET_PEER_PORT] = scope.get("client")[1]
107
108 # remove None values
109 result = {k: v for k, v in result.items() if v is not None}
110
111 return result
112
113
114 def get_host_port_url_tuple(scope):
115 """Returns (host, port, full_url) tuple.
116 """
117 server = scope.get("server") or ["0.0.0.0", 80]
118 port = server[1]
119 server_host = server[0] + (":" + str(port) if port != 80 else "")
120 full_path = scope.get("root_path", "") + scope.get("path", "")
121 http_url = scope.get("scheme", "http") + "://" + server_host + full_path
122 return server_host, port, http_url
123
124
125 def set_status_code(span, status_code):
126 """Adds HTTP response attributes to span using the status_code argument."""
127 if not span.is_recording():
128 return
129 try:
130 status_code = int(status_code)
131 except ValueError:
132 span.set_status(
133 Status(
134 StatusCode.ERROR,
135 "Non-integer HTTP status: " + repr(status_code),
136 )
137 )
138 else:
139 span.set_attribute(SpanAttributes.HTTP_STATUS_CODE, status_code)
140 span.set_status(Status(http_status_to_status_code(status_code)))
141
142
143 def get_default_span_details(scope: dict) -> Tuple[str, dict]:
144 """Default implementation for span_details_callback
145
146 Args:
147 scope: the asgi scope dictionary
148
149 Returns:
150 a tuple of the span, and any attributes to attach to the
151 span.
152 """
153 method_or_path = scope.get("method") or scope.get("path")
154
155 return method_or_path, {}
156
157
158 class OpenTelemetryMiddleware:
159 """The ASGI application middleware.
160
161 This class is an ASGI middleware that starts and annotates spans for any
162 requests it is invoked with.
163
164 Args:
165 app: The ASGI application callable to forward requests to.
166 span_details_callback: Callback which should return a string
167 and a tuple, representing the desired span name and a
168 dictionary with any additional span attributes to set.
169 Optional: Defaults to get_default_span_details.
170 tracer_provider: The optional tracer provider to use. If omitted
171 the current globally configured one is used.
172 """
173
174 def __init__(
175 self,
176 app,
177 excluded_urls=None,
178 span_details_callback=None,
179 tracer_provider=None,
180 ):
181 self.app = guarantee_single_callable(app)
182 self.tracer = trace.get_tracer(__name__, __version__, tracer_provider)
183 self.span_details_callback = (
184 span_details_callback or get_default_span_details
185 )
186 self.excluded_urls = excluded_urls
187
188 async def __call__(self, scope, receive, send):
189 """The ASGI application
190
191 Args:
192 scope: A ASGI environment.
193 receive: An awaitable callable yielding dictionaries
194 send: An awaitable callable taking a single dictionary as argument.
195 """
196 if scope["type"] not in ("http", "websocket"):
197 return await self.app(scope, receive, send)
198
199 _, _, url = get_host_port_url_tuple(scope)
200 if self.excluded_urls and self.excluded_urls.url_disabled(url):
201 return await self.app(scope, receive, send)
202
203 token = context.attach(extract(scope, getter=asgi_getter))
204 span_name, additional_attributes = self.span_details_callback(scope)
205
206 try:
207 with self.tracer.start_as_current_span(
208 span_name + " asgi", kind=trace.SpanKind.SERVER,
209 ) as span:
210 if span.is_recording():
211 attributes = collect_request_attributes(scope)
212 attributes.update(additional_attributes)
213 for key, value in attributes.items():
214 span.set_attribute(key, value)
215
216 @wraps(receive)
217 async def wrapped_receive():
218 with self.tracer.start_as_current_span(
219 span_name + " asgi." + scope["type"] + ".receive"
220 ) as receive_span:
221 message = await receive()
222 if receive_span.is_recording():
223 if message["type"] == "websocket.receive":
224 set_status_code(receive_span, 200)
225 receive_span.set_attribute("type", message["type"])
226 return message
227
228 @wraps(send)
229 async def wrapped_send(message):
230 with self.tracer.start_as_current_span(
231 span_name + " asgi." + scope["type"] + ".send"
232 ) as send_span:
233 if send_span.is_recording():
234 if message["type"] == "http.response.start":
235 status_code = message["status"]
236 set_status_code(send_span, status_code)
237 elif message["type"] == "websocket.send":
238 set_status_code(send_span, 200)
239 send_span.set_attribute("type", message["type"])
240 await send(message)
241
242 await self.app(scope, wrapped_receive, wrapped_send)
243 finally:
244 context.detach(token)
245
[end of instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py
@@ -233,8 +233,10 @@
if send_span.is_recording():
if message["type"] == "http.response.start":
status_code = message["status"]
+ set_status_code(span, status_code)
set_status_code(send_span, status_code)
elif message["type"] == "websocket.send":
+ set_status_code(span, 200)
set_status_code(send_span, 200)
send_span.set_attribute("type", message["type"])
await send(message)
| {"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py\n@@ -233,8 +233,10 @@\n if send_span.is_recording():\n if message[\"type\"] == \"http.response.start\":\n status_code = message[\"status\"]\n+ set_status_code(span, status_code)\n set_status_code(send_span, status_code)\n elif message[\"type\"] == \"websocket.send\":\n+ set_status_code(span, 200)\n set_status_code(send_span, 200)\n send_span.set_attribute(\"type\", message[\"type\"])\n await send(message)\n", "issue": "FastAPI Instrumentor not populating attributes required for Azure exporter\n\r\n\r\n**Describe your environment** \r\n\r\n**Package Version - azure-monitor-opentelemetry-exporter = {version = \"^1.0.0-beta.3\", allow-prereleases = true}\r\nopentelemetry-instrumentation-fastapi = \"^0.19b0\"\r\nOperating System - macOS Mojave v10.14.6**\r\n**Python Version - v3.9.0**\r\n\r\n**Steps to reproduce**\r\nWhen using the azure-monitor-opentelemetry-exporter in conjunction with opentelemetry-instrumentation-fastapi \r\nthe response code is not populated by the exporter, raised this issue with Azure monitor team and was redirected here;\r\n\r\n> Looking at the output, it seems like the spans are missing some attributes. This is probably an issue in the opentelemetry-instrumentation-asgi, could you file an issue here for this problem specifically? Basically, the exporter sees if http.status_code is populated in the span attributes to populate response_Code in app insights. Since the attribute is not there, this is why you are seeing 0s. Normally, it is up to the instrumentation to populate this.\r\n\r\n[Azure monitor issue#17839](https://github.com/Azure/azure-sdk-for-python/issues/17839)\r\n\r\nSpans obtained from console exporter: \r\n\r\n- one for successful - 201 request\r\n- one for failed - 422 request\r\n- one on exit\r\n\r\n[json.zip](https://github.com/open-telemetry/opentelemetry-python-contrib/files/6281056/json.zip)\r\n\r\nScreen grab from Azure App Insights indication reponse code as 0:\r\n\r\n\r\n\r\n**What is the expected behavior?**\r\nCompatibility with Azure monitor exporter\r\n\r\n**Additional context**\r\nDetailed context for this issue\r\n\r\n[Azure monitor issue#17839](https://github.com/Azure/azure-sdk-for-python/issues/17839)\r\n\r\nPlease let me know in case of additional info required.\r\n\r\nThanks.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe opentelemetry-instrumentation-asgi package provides an ASGI middleware that can be used\non any ASGI framework (such as Django-channels / Quart) to track requests\ntiming through OpenTelemetry.\n\"\"\"\n\nimport typing\nimport urllib\nfrom functools import wraps\nfrom typing import Tuple\n\nfrom asgiref.compatibility import guarantee_single_callable\n\nfrom opentelemetry import context, trace\nfrom opentelemetry.instrumentation.asgi.version import __version__ # noqa\nfrom opentelemetry.instrumentation.utils import http_status_to_status_code\nfrom opentelemetry.propagate import extract\nfrom opentelemetry.propagators.textmap import Getter\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace.status import Status, StatusCode\n\n\nclass ASGIGetter(Getter):\n def get(\n self, carrier: dict, key: str\n ) -> typing.Optional[typing.List[str]]:\n \"\"\"Getter implementation to retrieve a HTTP header value from the ASGI\n scope.\n\n Args:\n carrier: ASGI scope object\n key: header name in scope\n Returns:\n A list with a single string with the header value if it exists,\n else None.\n \"\"\"\n headers = carrier.get(\"headers\")\n if not headers:\n return None\n\n # asgi header keys are in lower case\n key = key.lower()\n decoded = [\n _value.decode(\"utf8\")\n for (_key, _value) in headers\n if _key.decode(\"utf8\") == key\n ]\n if not decoded:\n return None\n return decoded\n\n def keys(self, carrier: dict) -> typing.List[str]:\n return list(carrier.keys())\n\n\nasgi_getter = ASGIGetter()\n\n\ndef collect_request_attributes(scope):\n \"\"\"Collects HTTP request attributes from the ASGI scope and returns a\n dictionary to be used as span creation attributes.\"\"\"\n server_host, port, http_url = get_host_port_url_tuple(scope)\n query_string = scope.get(\"query_string\")\n if query_string and http_url:\n if isinstance(query_string, bytes):\n query_string = query_string.decode(\"utf8\")\n http_url = http_url + (\"?\" + urllib.parse.unquote(query_string))\n\n result = {\n SpanAttributes.HTTP_SCHEME: scope.get(\"scheme\"),\n SpanAttributes.HTTP_HOST: server_host,\n SpanAttributes.NET_HOST_PORT: port,\n SpanAttributes.HTTP_FLAVOR: scope.get(\"http_version\"),\n SpanAttributes.HTTP_TARGET: scope.get(\"path\"),\n SpanAttributes.HTTP_URL: http_url,\n }\n http_method = scope.get(\"method\")\n if http_method:\n result[SpanAttributes.HTTP_METHOD] = http_method\n\n http_host_value_list = asgi_getter.get(scope, \"host\")\n if http_host_value_list:\n result[SpanAttributes.HTTP_SERVER_NAME] = \",\".join(\n http_host_value_list\n )\n http_user_agent = asgi_getter.get(scope, \"user-agent\")\n if http_user_agent:\n result[SpanAttributes.HTTP_USER_AGENT] = http_user_agent[0]\n\n if \"client\" in scope and scope[\"client\"] is not None:\n result[SpanAttributes.NET_PEER_IP] = scope.get(\"client\")[0]\n result[SpanAttributes.NET_PEER_PORT] = scope.get(\"client\")[1]\n\n # remove None values\n result = {k: v for k, v in result.items() if v is not None}\n\n return result\n\n\ndef get_host_port_url_tuple(scope):\n \"\"\"Returns (host, port, full_url) tuple.\n \"\"\"\n server = scope.get(\"server\") or [\"0.0.0.0\", 80]\n port = server[1]\n server_host = server[0] + (\":\" + str(port) if port != 80 else \"\")\n full_path = scope.get(\"root_path\", \"\") + scope.get(\"path\", \"\")\n http_url = scope.get(\"scheme\", \"http\") + \"://\" + server_host + full_path\n return server_host, port, http_url\n\n\ndef set_status_code(span, status_code):\n \"\"\"Adds HTTP response attributes to span using the status_code argument.\"\"\"\n if not span.is_recording():\n return\n try:\n status_code = int(status_code)\n except ValueError:\n span.set_status(\n Status(\n StatusCode.ERROR,\n \"Non-integer HTTP status: \" + repr(status_code),\n )\n )\n else:\n span.set_attribute(SpanAttributes.HTTP_STATUS_CODE, status_code)\n span.set_status(Status(http_status_to_status_code(status_code)))\n\n\ndef get_default_span_details(scope: dict) -> Tuple[str, dict]:\n \"\"\"Default implementation for span_details_callback\n\n Args:\n scope: the asgi scope dictionary\n\n Returns:\n a tuple of the span, and any attributes to attach to the\n span.\n \"\"\"\n method_or_path = scope.get(\"method\") or scope.get(\"path\")\n\n return method_or_path, {}\n\n\nclass OpenTelemetryMiddleware:\n \"\"\"The ASGI application middleware.\n\n This class is an ASGI middleware that starts and annotates spans for any\n requests it is invoked with.\n\n Args:\n app: The ASGI application callable to forward requests to.\n span_details_callback: Callback which should return a string\n and a tuple, representing the desired span name and a\n dictionary with any additional span attributes to set.\n Optional: Defaults to get_default_span_details.\n tracer_provider: The optional tracer provider to use. If omitted\n the current globally configured one is used.\n \"\"\"\n\n def __init__(\n self,\n app,\n excluded_urls=None,\n span_details_callback=None,\n tracer_provider=None,\n ):\n self.app = guarantee_single_callable(app)\n self.tracer = trace.get_tracer(__name__, __version__, tracer_provider)\n self.span_details_callback = (\n span_details_callback or get_default_span_details\n )\n self.excluded_urls = excluded_urls\n\n async def __call__(self, scope, receive, send):\n \"\"\"The ASGI application\n\n Args:\n scope: A ASGI environment.\n receive: An awaitable callable yielding dictionaries\n send: An awaitable callable taking a single dictionary as argument.\n \"\"\"\n if scope[\"type\"] not in (\"http\", \"websocket\"):\n return await self.app(scope, receive, send)\n\n _, _, url = get_host_port_url_tuple(scope)\n if self.excluded_urls and self.excluded_urls.url_disabled(url):\n return await self.app(scope, receive, send)\n\n token = context.attach(extract(scope, getter=asgi_getter))\n span_name, additional_attributes = self.span_details_callback(scope)\n\n try:\n with self.tracer.start_as_current_span(\n span_name + \" asgi\", kind=trace.SpanKind.SERVER,\n ) as span:\n if span.is_recording():\n attributes = collect_request_attributes(scope)\n attributes.update(additional_attributes)\n for key, value in attributes.items():\n span.set_attribute(key, value)\n\n @wraps(receive)\n async def wrapped_receive():\n with self.tracer.start_as_current_span(\n span_name + \" asgi.\" + scope[\"type\"] + \".receive\"\n ) as receive_span:\n message = await receive()\n if receive_span.is_recording():\n if message[\"type\"] == \"websocket.receive\":\n set_status_code(receive_span, 200)\n receive_span.set_attribute(\"type\", message[\"type\"])\n return message\n\n @wraps(send)\n async def wrapped_send(message):\n with self.tracer.start_as_current_span(\n span_name + \" asgi.\" + scope[\"type\"] + \".send\"\n ) as send_span:\n if send_span.is_recording():\n if message[\"type\"] == \"http.response.start\":\n status_code = message[\"status\"]\n set_status_code(send_span, status_code)\n elif message[\"type\"] == \"websocket.send\":\n set_status_code(send_span, 200)\n send_span.set_attribute(\"type\", message[\"type\"])\n await send(message)\n\n await self.app(scope, wrapped_receive, wrapped_send)\n finally:\n context.detach(token)\n", "path": "instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py"}]} | 3,559 | 223 |
gh_patches_debug_2226 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-5568 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support matplotlib-3.4.0rc1
## Description of the issue
`matplotlib._get_data_path` no longer exists since 3.4.0rc1: https://github.com/matplotlib/matplotlib/commit/e1352c71f07aee7eab004b73dd9bda2a260ab31b.
This is on schedule for the removal of the deprecations that occurred around the time of #5006. The missing function leads to a traceback output during build, and the whole `cwd` being crammed into `mpl-data`. Finally, `matplotlib` cannot be imported in the packaged app because it cannot find `mpl-data/matplotlibrc`.
## Context information (for bug reports)
* Output of `pyinstaller --version`: ```4.2```
* Version of Python: `3.8`
* Platform: `Windows`
* Did you also try this on another platform? Does it work there? `Surely it is a cross platform bug`
> * try the latest development version, using the following command:
>
> ```shell
> pip install https://github.com/pyinstaller/pyinstaller/archive/develop.zip
> ```
>
> * follow *all* the instructions in our "If Things Go Wrong" Guide
> (https://github.com/pyinstaller/pyinstaller/wiki/If-Things-Go-Wrong) and
>
> ### Make sure [everything is packaged correctly](https://github.com/pyinstaller/pyinstaller/wiki/How-to-Report-Bugs#make-sure-everything-is-packaged-correctly)
>
> * [ ] start with clean installation
> * [ ] use the latest development version
> * [ ] Run your frozen program **from a command window (shell)** — instead of double-clicking on it
> * [ ] Package your program in **--onedir mode**
> * [ ] Package **without UPX**, say: use the option `--noupx` or set `upx=False` in your .spec-file
> * [ ] Repackage you application in **verbose/debug mode**. For this, pass the option `--debug` to `pyi-makespec` or `pyinstaller` or use `EXE(..., debug=1, ...)` in your .spec file.
>
This part of the template is irrelevant, as the responsible code is unchanged on `develop`
### A minimal example program which shows the error
```
pip install --pre matplotlib==3.4.0rc1 pyinstaller
echo "import matplotlib" > test.py
pyinstaller test.py
```
### Stacktrace / full error message
Building `pyinstaller test.py`:
```
20391 INFO: Loading module hook 'hook-matplotlib.py' from 'XXXXXXX'...
Traceback (most recent call last):
File "<string>", line 1, in <module>
AttributeError: module 'matplotlib' has no attribute '_get_data_path'
```
Running `test.exe`:
```
Traceback (most recent call last):
File "test.py", line 1, in <module>
import matplotlib
File "<frozen importlib._bootstrap>", line 991, in _find_and_load
File "<frozen importlib._bootstrap>", line 975, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 671, in _load_unlocked
File "PyInstaller\loader\pyimod03_importers.py", line 531, in exec_module
File "matplotlib\__init__.py", line 820, in <module>
File "matplotlib\__init__.py", line 725, in _rc_params_in_file
File "contextlib.py", line 113, in __enter__
File "matplotlib\__init__.py", line 703, in _open_file_or_url
FileNotFoundError: [Errno 2] No such file or directory: 'xxxxx\\matplotlib\\mpl-data\\matplotlibrc'
[2688] Failed to execute script test
```
## Possible resolution
Simply remove the first underscore in `matplotlib._get_data_path` in
https://github.com/pyinstaller/pyinstaller/blob/b9fcbbf86bc71addafc830debe289e7edb2a5697/PyInstaller/hooks/hook-matplotlib.py#L16
This works on my system.
I'm a little confused as to why the private function was being used in the first place. `matplotlib.get_data_path` has been available for some time.
</issue>
<code>
[start of PyInstaller/hooks/hook-matplotlib.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2013-2021, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License (version 2
5 # or later) with exception for distributing the bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
10 #-----------------------------------------------------------------------------
11
12
13 from PyInstaller.utils.hooks import exec_statement
14
15 mpl_data_dir = exec_statement(
16 "import matplotlib; print(matplotlib._get_data_path())")
17
18 datas = [
19 (mpl_data_dir, "matplotlib/mpl-data"),
20 ]
21
[end of PyInstaller/hooks/hook-matplotlib.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/PyInstaller/hooks/hook-matplotlib.py b/PyInstaller/hooks/hook-matplotlib.py
--- a/PyInstaller/hooks/hook-matplotlib.py
+++ b/PyInstaller/hooks/hook-matplotlib.py
@@ -13,7 +13,7 @@
from PyInstaller.utils.hooks import exec_statement
mpl_data_dir = exec_statement(
- "import matplotlib; print(matplotlib._get_data_path())")
+ "import matplotlib; print(matplotlib.get_data_path())")
datas = [
(mpl_data_dir, "matplotlib/mpl-data"),
| {"golden_diff": "diff --git a/PyInstaller/hooks/hook-matplotlib.py b/PyInstaller/hooks/hook-matplotlib.py\n--- a/PyInstaller/hooks/hook-matplotlib.py\n+++ b/PyInstaller/hooks/hook-matplotlib.py\n@@ -13,7 +13,7 @@\n from PyInstaller.utils.hooks import exec_statement\n \n mpl_data_dir = exec_statement(\n- \"import matplotlib; print(matplotlib._get_data_path())\")\n+ \"import matplotlib; print(matplotlib.get_data_path())\")\n \n datas = [\n (mpl_data_dir, \"matplotlib/mpl-data\"),\n", "issue": "Support matplotlib-3.4.0rc1\n## Description of the issue\r\n`matplotlib._get_data_path` no longer exists since 3.4.0rc1: https://github.com/matplotlib/matplotlib/commit/e1352c71f07aee7eab004b73dd9bda2a260ab31b.\r\nThis is on schedule for the removal of the deprecations that occurred around the time of #5006. The missing function leads to a traceback output during build, and the whole `cwd` being crammed into `mpl-data`. Finally, `matplotlib` cannot be imported in the packaged app because it cannot find `mpl-data/matplotlibrc`.\r\n## Context information (for bug reports)\r\n\r\n* Output of `pyinstaller --version`: ```4.2```\r\n* Version of Python: `3.8`\r\n* Platform: `Windows`\r\n* Did you also try this on another platform? Does it work there? `Surely it is a cross platform bug`\r\n\r\n\r\n> * try the latest development version, using the following command: \r\n> \r\n> ```shell\r\n> pip install https://github.com/pyinstaller/pyinstaller/archive/develop.zip\r\n> ```\r\n> \r\n> * follow *all* the instructions in our \"If Things Go Wrong\" Guide\r\n> (https://github.com/pyinstaller/pyinstaller/wiki/If-Things-Go-Wrong) and\r\n> \r\n> ### Make sure [everything is packaged correctly](https://github.com/pyinstaller/pyinstaller/wiki/How-to-Report-Bugs#make-sure-everything-is-packaged-correctly)\r\n> \r\n> * [ ] start with clean installation\r\n> * [ ] use the latest development version\r\n> * [ ] Run your frozen program **from a command window (shell)** \u2014 instead of double-clicking on it\r\n> * [ ] Package your program in **--onedir mode**\r\n> * [ ] Package **without UPX**, say: use the option `--noupx` or set `upx=False` in your .spec-file\r\n> * [ ] Repackage you application in **verbose/debug mode**. For this, pass the option `--debug` to `pyi-makespec` or `pyinstaller` or use `EXE(..., debug=1, ...)` in your .spec file.\r\n> \r\nThis part of the template is irrelevant, as the responsible code is unchanged on `develop`\r\n\r\n### A minimal example program which shows the error\r\n\r\n```\r\npip install --pre matplotlib==3.4.0rc1 pyinstaller\r\necho \"import matplotlib\" > test.py\r\npyinstaller test.py\r\n```\r\n\r\n### Stacktrace / full error message\r\nBuilding `pyinstaller test.py`:\r\n```\r\n20391 INFO: Loading module hook 'hook-matplotlib.py' from 'XXXXXXX'...\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\nAttributeError: module 'matplotlib' has no attribute '_get_data_path'\r\n\r\n```\r\nRunning `test.exe`:\r\n```\r\nTraceback (most recent call last):\r\n File \"test.py\", line 1, in <module>\r\n import matplotlib\r\n File \"<frozen importlib._bootstrap>\", line 991, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 975, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 671, in _load_unlocked\r\n File \"PyInstaller\\loader\\pyimod03_importers.py\", line 531, in exec_module\r\n File \"matplotlib\\__init__.py\", line 820, in <module>\r\n File \"matplotlib\\__init__.py\", line 725, in _rc_params_in_file\r\n File \"contextlib.py\", line 113, in __enter__\r\n File \"matplotlib\\__init__.py\", line 703, in _open_file_or_url\r\nFileNotFoundError: [Errno 2] No such file or directory: 'xxxxx\\\\matplotlib\\\\mpl-data\\\\matplotlibrc'\r\n[2688] Failed to execute script test\r\n\r\n```\r\n\r\n## Possible resolution\r\n\r\nSimply remove the first underscore in `matplotlib._get_data_path` in \r\n\r\nhttps://github.com/pyinstaller/pyinstaller/blob/b9fcbbf86bc71addafc830debe289e7edb2a5697/PyInstaller/hooks/hook-matplotlib.py#L16\r\n\r\nThis works on my system.\r\n\r\nI'm a little confused as to why the private function was being used in the first place. `matplotlib.get_data_path` has been available for some time.\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2021, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\n\nfrom PyInstaller.utils.hooks import exec_statement\n\nmpl_data_dir = exec_statement(\n \"import matplotlib; print(matplotlib._get_data_path())\")\n\ndatas = [\n (mpl_data_dir, \"matplotlib/mpl-data\"),\n]\n", "path": "PyInstaller/hooks/hook-matplotlib.py"}]} | 1,729 | 126 |
gh_patches_debug_22927 | rasdani/github-patches | git_diff | beeware__toga-1193 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ImageView only works with square images
I created a hello world app following this tutorial under Linux Mint 20: [https://docs.beeware.org/en/latest/tutorial/tutorial-0.html](https://docs.beeware.org/en/latest/tutorial/tutorial-0.html)
My python version is 3.8.5
When trying to add images some work and some not.
This code doesn't work:
`image_from_url = toga.Image("https://dummyimage.com/100x67/000/fff")
imageview_from_url = toga.ImageView(image=image_from_url, style=Pack(width=100, height=67))`
And this is working fine:
`image_from_url = toga.Image("https://dummyimage.com/100x100/000/fff")
imageview_from_url = toga.ImageView(image=image_from_url, style=Pack(width=100, height=100))`
The error I get is:
> (__main__.py:67130): GdkPixbuf-CRITICAL **: 16:12:00.644: gdk_pixbuf_scale_simple: assertion 'dest_height > 0' failed
> ... TypeError: Argument 0 does not allow None as a value
</issue>
<code>
[start of src/gtk/toga_gtk/widgets/imageview.py]
1 from ..libs import GdkPixbuf, Gtk, Gdk
2 from .base import Widget
3
4
5 class ImageView(Widget):
6 def create(self):
7 self.native = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
8 self._image = Gtk.Image()
9 self._pixbuf = None
10 self.native.add(self._image)
11 self.native.interface = self.interface
12
13 def set_image(self, image):
14 self._pixbuf = image._impl.native
15
16 def set_bounds(self, x, y, width, height):
17 super().set_bounds(x, y, width, height)
18 # rehint to update scaling of pixbuf
19 self.rehint()
20
21 def rehint(self):
22 if self._pixbuf:
23 height, width = self._resize_max(
24 original_height=self._pixbuf.get_height(),
25 original_width=self._pixbuf.get_width(),
26 max_height=self.native.get_allocated_height(),
27 max_width=self.native.get_allocated_width(),
28 )
29
30 dpr = self.native.get_scale_factor()
31
32 scaled_pixbuf = self._pixbuf.scale_simple(
33 width * dpr, height * dpr, GdkPixbuf.InterpType.BILINEAR
34 )
35
36 surface = Gdk.cairo_surface_create_from_pixbuf(
37 scaled_pixbuf, 0, self.native.get_window() # scale: 0 = same as window
38 )
39 self._image.set_from_surface(surface)
40
41 @staticmethod
42 def _resize_max(original_height, original_width, max_height, max_width):
43
44 # Check to make sure all dimensions have valid sizes
45 if min(original_height, original_width, max_height, max_width) <= 0:
46 return 1, 1
47
48 width_ratio = max_width / original_width
49 height_ratio = max_height / original_height
50
51 height = original_height * width_ratio
52 if height <= max_height:
53 width = original_width * width_ratio
54 else:
55 height = original_height * height_ratio
56 width = original_width * height_ratio
57
58 return int(height), int(width)
59
[end of src/gtk/toga_gtk/widgets/imageview.py]
[start of examples/imageview/imageview/app.py]
1 import toga
2 from toga.style.pack import CENTER, COLUMN
3
4
5 class ImageViewApp(toga.App):
6 def startup(self):
7 self.main_window = toga.MainWindow(title=self.name)
8
9 box = toga.Box()
10 box.style.padding = 40
11 box.style.update(alignment=CENTER)
12 box.style.update(direction=COLUMN)
13
14 # image from local path
15 # load brutus.png from the package
16 # We set the style width/height parameters for this one
17 image_from_path = toga.Image('resources/brutus.png')
18 imageview_from_path = toga.ImageView(image_from_path)
19 imageview_from_path.style.update(height=72)
20 imageview_from_path.style.update(width=72)
21 box.add(imageview_from_path)
22
23 # image from remote URL
24 # no style parameters - we let Pack determine how to allocate
25 # the space
26 image_from_url = toga.Image('https://beeware.org/project/projects/libraries/toga/toga.png')
27 imageview_from_url = toga.ImageView(image_from_url)
28 box.add(imageview_from_url)
29
30 self.main_window.content = box
31 self.main_window.show()
32
33
34 def main():
35 return ImageViewApp('ImageView', 'org.beeware.widgets.imageview')
36
37
38 if __name__ == '__main__':
39 app = main()
40 app.main_loop()
41
[end of examples/imageview/imageview/app.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/imageview/imageview/app.py b/examples/imageview/imageview/app.py
--- a/examples/imageview/imageview/app.py
+++ b/examples/imageview/imageview/app.py
@@ -14,10 +14,9 @@
# image from local path
# load brutus.png from the package
# We set the style width/height parameters for this one
- image_from_path = toga.Image('resources/brutus.png')
+ image_from_path = toga.Image('resources/pride-brutus.png')
imageview_from_path = toga.ImageView(image_from_path)
imageview_from_path.style.update(height=72)
- imageview_from_path.style.update(width=72)
box.add(imageview_from_path)
# image from remote URL
diff --git a/src/gtk/toga_gtk/widgets/imageview.py b/src/gtk/toga_gtk/widgets/imageview.py
--- a/src/gtk/toga_gtk/widgets/imageview.py
+++ b/src/gtk/toga_gtk/widgets/imageview.py
@@ -55,4 +55,8 @@
height = original_height * height_ratio
width = original_width * height_ratio
- return int(height), int(width)
+ # On the first display the allocated height/width will be 1x1.
+ # If the image isn't square, this will result in one of the dimensions
+ # scaling to 0, which breaks GTK. So; constraint the minimum height
+ # and width to 1.
+ return max(int(height), 1), max(int(width), 1)
| {"golden_diff": "diff --git a/examples/imageview/imageview/app.py b/examples/imageview/imageview/app.py\n--- a/examples/imageview/imageview/app.py\n+++ b/examples/imageview/imageview/app.py\n@@ -14,10 +14,9 @@\n # image from local path\n # load brutus.png from the package\n # We set the style width/height parameters for this one\n- image_from_path = toga.Image('resources/brutus.png')\n+ image_from_path = toga.Image('resources/pride-brutus.png')\n imageview_from_path = toga.ImageView(image_from_path)\n imageview_from_path.style.update(height=72)\n- imageview_from_path.style.update(width=72)\n box.add(imageview_from_path)\n \n # image from remote URL\ndiff --git a/src/gtk/toga_gtk/widgets/imageview.py b/src/gtk/toga_gtk/widgets/imageview.py\n--- a/src/gtk/toga_gtk/widgets/imageview.py\n+++ b/src/gtk/toga_gtk/widgets/imageview.py\n@@ -55,4 +55,8 @@\n height = original_height * height_ratio\n width = original_width * height_ratio\n \n- return int(height), int(width)\n+ # On the first display the allocated height/width will be 1x1.\n+ # If the image isn't square, this will result in one of the dimensions\n+ # scaling to 0, which breaks GTK. So; constraint the minimum height\n+ # and width to 1.\n+ return max(int(height), 1), max(int(width), 1)\n", "issue": "ImageView only works with square images\nI created a hello world app following this tutorial under Linux Mint 20: [https://docs.beeware.org/en/latest/tutorial/tutorial-0.html](https://docs.beeware.org/en/latest/tutorial/tutorial-0.html)\r\nMy python version is 3.8.5\r\nWhen trying to add images some work and some not.\r\n\r\nThis code doesn't work:\r\n`image_from_url = toga.Image(\"https://dummyimage.com/100x67/000/fff\")\r\nimageview_from_url = toga.ImageView(image=image_from_url, style=Pack(width=100, height=67))`\r\n\r\nAnd this is working fine:\r\n`image_from_url = toga.Image(\"https://dummyimage.com/100x100/000/fff\")\r\nimageview_from_url = toga.ImageView(image=image_from_url, style=Pack(width=100, height=100))`\r\n\r\nThe error I get is:\r\n\r\n> (__main__.py:67130): GdkPixbuf-CRITICAL **: 16:12:00.644: gdk_pixbuf_scale_simple: assertion 'dest_height > 0' failed\r\n> ... TypeError: Argument 0 does not allow None as a value\r\n\n", "before_files": [{"content": "from ..libs import GdkPixbuf, Gtk, Gdk\nfrom .base import Widget\n\n\nclass ImageView(Widget):\n def create(self):\n self.native = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n self._image = Gtk.Image()\n self._pixbuf = None\n self.native.add(self._image)\n self.native.interface = self.interface\n\n def set_image(self, image):\n self._pixbuf = image._impl.native\n\n def set_bounds(self, x, y, width, height):\n super().set_bounds(x, y, width, height)\n # rehint to update scaling of pixbuf\n self.rehint()\n\n def rehint(self):\n if self._pixbuf:\n height, width = self._resize_max(\n original_height=self._pixbuf.get_height(),\n original_width=self._pixbuf.get_width(),\n max_height=self.native.get_allocated_height(),\n max_width=self.native.get_allocated_width(),\n )\n\n dpr = self.native.get_scale_factor()\n\n scaled_pixbuf = self._pixbuf.scale_simple(\n width * dpr, height * dpr, GdkPixbuf.InterpType.BILINEAR\n )\n\n surface = Gdk.cairo_surface_create_from_pixbuf(\n scaled_pixbuf, 0, self.native.get_window() # scale: 0 = same as window\n )\n self._image.set_from_surface(surface)\n\n @staticmethod\n def _resize_max(original_height, original_width, max_height, max_width):\n\n # Check to make sure all dimensions have valid sizes\n if min(original_height, original_width, max_height, max_width) <= 0:\n return 1, 1\n\n width_ratio = max_width / original_width\n height_ratio = max_height / original_height\n\n height = original_height * width_ratio\n if height <= max_height:\n width = original_width * width_ratio\n else:\n height = original_height * height_ratio\n width = original_width * height_ratio\n\n return int(height), int(width)\n", "path": "src/gtk/toga_gtk/widgets/imageview.py"}, {"content": "import toga\nfrom toga.style.pack import CENTER, COLUMN\n\n\nclass ImageViewApp(toga.App):\n def startup(self):\n self.main_window = toga.MainWindow(title=self.name)\n\n box = toga.Box()\n box.style.padding = 40\n box.style.update(alignment=CENTER)\n box.style.update(direction=COLUMN)\n\n # image from local path\n # load brutus.png from the package\n # We set the style width/height parameters for this one\n image_from_path = toga.Image('resources/brutus.png')\n imageview_from_path = toga.ImageView(image_from_path)\n imageview_from_path.style.update(height=72)\n imageview_from_path.style.update(width=72)\n box.add(imageview_from_path)\n\n # image from remote URL\n # no style parameters - we let Pack determine how to allocate\n # the space\n image_from_url = toga.Image('https://beeware.org/project/projects/libraries/toga/toga.png')\n imageview_from_url = toga.ImageView(image_from_url)\n box.add(imageview_from_url)\n\n self.main_window.content = box\n self.main_window.show()\n\n\ndef main():\n return ImageViewApp('ImageView', 'org.beeware.widgets.imageview')\n\n\nif __name__ == '__main__':\n app = main()\n app.main_loop()\n", "path": "examples/imageview/imageview/app.py"}]} | 1,757 | 351 |
gh_patches_debug_32054 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-2186 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Don't allow public schema to be edited
We currently allow editing the public schema's name and description from the schema page. We should disable this.
<img width="968" alt="Screen Shot 2022-11-18 at 12 03 58 PM" src="https://user-images.githubusercontent.com/287034/202761131-265da99a-6f76-4f02-b680-7a98851427b3.png">
</issue>
<code>
[start of mathesar/api/exceptions/validation_exceptions/exceptions.py]
1 from mathesar.api.exceptions.error_codes import ErrorCodes
2 from mathesar.api.exceptions.validation_exceptions.base_exceptions import MathesarValidationException
3
4
5 class DistinctColumnRequiredAPIException(MathesarValidationException):
6 error_code = ErrorCodes.DistinctColumnNameRequired.value
7
8 def __init__(
9 self,
10 message="Column names must be distinct",
11 field=None,
12 details=None,
13 ):
14 super().__init__(None, self.error_code, message, field, details)
15
16
17 class ColumnSizeMismatchAPIException(MathesarValidationException):
18 error_code = ErrorCodes.ColumnSizeMismatch.value
19
20 def __init__(
21 self,
22 message="Incorrect number of columns in request.",
23 field=None,
24 details=None,
25 ):
26 super().__init__(None, self.error_code, message, field, details)
27
28
29 class InvalidLinkChoiceAPIException(MathesarValidationException):
30 error_code = ErrorCodes.InvalidLinkChoice.value
31
32 def __init__(
33 self,
34 message="Invalid Link type",
35 field=None,
36 details=None,
37 ):
38 super().__init__(None, self.error_code, message, field, details)
39
40
41 class MultipleDataFileAPIException(MathesarValidationException):
42 error_code = ErrorCodes.MultipleDataFiles.value
43
44 def __init__(
45 self,
46 message="Multiple data files are unsupported.",
47 field=None,
48 details=None,
49 ):
50 super().__init__(None, self.error_code, message, field, details)
51
52
53 class UnknownDatabaseTypeIdentifier(MathesarValidationException):
54 error_code = ErrorCodes.UnknownDBType.value
55
56 def __init__(
57 self,
58 db_type_id,
59 field=None,
60 details=None,
61 ):
62 message = f"Unknown database type identifier {db_type_id}."
63 super().__init__(None, self.error_code, message, field, details)
64
65
66 class MoneyDisplayOptionValueConflictAPIException(MathesarValidationException):
67 error_code = ErrorCodes.MoneyDisplayOptionConflict.value
68
69 def __init__(
70 self,
71 message="Money type cannot specify a currency code display option as well as other display options.",
72 field=None,
73 details=None,
74 ):
75 super().__init__(None, self.error_code, message, field, details)
76
77
78 class IncompatibleFractionDigitValuesAPIException(MathesarValidationException):
79 error_code = ErrorCodes.IncompatibleFractionDigitValues.value
80
81 def __init__(
82 self,
83 message="maximum_fraction_digits cannot be less than minimum_fraction_digits.",
84 field=None,
85 details=None,
86 ):
87 super().__init__(None, self.error_code, message, field, details)
88
89
90 class UnsupportedConstraintAPIException(MathesarValidationException):
91 error_code = ErrorCodes.UnsupportedConstraint.value
92
93 def __init__(
94 self,
95 constraint_type,
96 field=None,
97 ):
98 message = f"Operations related to {constraint_type} constraint are currently not supported"
99 super().__init__(None, self.error_code, message, field, None)
100
101
102 class ConstraintColumnEmptyAPIException(MathesarValidationException):
103 error_code = ErrorCodes.ConstraintColumnEmpty.value
104
105 def __init__(
106 self,
107 field=None,
108 ):
109 message = "Constraint column field cannot be empty"
110 super().__init__(None, self.error_code, message, field, None)
111
112
113 class InvalidValueType(MathesarValidationException):
114 error_code = ErrorCodes.InvalidValueType.value
115
116 def __init__(
117 self,
118 message=None,
119 field=None,
120 ):
121 if message is None:
122 message = "Value's type is invalid."
123 super().__init__(None, self.error_code, message, field, None)
124
125
126 class DictHasBadKeys(MathesarValidationException):
127 error_code = ErrorCodes.DictHasBadKeys.value
128
129 def __init__(
130 self,
131 message=None,
132 field=None,
133 ):
134 if message is None:
135 message = "Dictionary's keys are invalid or obligatory keys are missing."
136 super().__init__(None, self.error_code, message, field, None)
137
138
139 class InvalidTableName(MathesarValidationException):
140 error_code = ErrorCodes.InvalidTableName.value
141
142 def __init__(
143 self,
144 table_name,
145 message=None,
146 field=None,
147 ):
148 message = f'Table name "{table_name}" is invalid.'
149 super().__init__(None, self.error_code, message, field, None)
150
151
152 class IncorrectOldPassword(MathesarValidationException):
153 error_code = ErrorCodes.IncorrectOldPassword.value
154
155 def __init__(
156 self,
157
158 field=None,
159 ):
160 message = "Old password is not correct"
161 super().__init__(None, self.error_code, message, field, None)
162
[end of mathesar/api/exceptions/validation_exceptions/exceptions.py]
[start of mathesar/api/exceptions/error_codes.py]
1 from enum import Enum, unique
2
3
4 @unique
5 class ErrorCodes(Enum):
6 # Matches with default code of drf-friendly-errors library
7 # API Error
8 MethodNotAllowed = 4006
9 NotFound = 4005
10 TableNotFound = 4041
11 RecordNotFound = 4042
12 UnknownError = 4999
13 # Generic Errors
14 ProgrammingError = 4101
15 TypeError = 4102
16 ValueError = 4103
17
18 # DB Error Codes
19 DuplicateTableError = 4205
20 DuplicateColumnError = 4206
21 ExclusionViolation = 4213
22 ForeignKeyViolation = 4212
23 InvalidTypeCast = 4203
24 InvalidTypeOption = 4210
25 InvalidDefault = 4211
26 NonClassifiedIntegrityError = 4201
27 NotNullViolation = 4204
28 RaiseException = 4202
29 TypeMismatchViolation = 4214
30 UndefinedFunction = 4207
31 UniqueViolation = 4208
32 UnsupportedType = 4209
33
34 # Data Imports error code
35 InvalidTableError = 4301
36 UniqueImportViolation = 4303
37
38 # Validation Error
39 ColumnSizeMismatch = 4401
40 DistinctColumnNameRequired = 4402
41 MappingsNotFound = 4417
42 MultipleDataFiles = 4400
43 MoneyDisplayOptionConflict = 4407
44 UnsupportedAlter = 4403
45 URLDownloadError = 4404
46 URLNotReachableError = 4405
47 URLInvalidContentType = 4406
48 UnknownDBType = 4408
49 InvalidDateError = 4413
50 InvalidDateFormatError = 4414
51 InvalidLinkChoice = 4409
52 InvalidTableName = 4420
53 IncompatibleFractionDigitValues = 4410
54 UnsupportedConstraint = 4411
55 ConstraintColumnEmpty = 4412
56 InvalidValueType = 4415
57 DictHasBadKeys = 4416
58 DeletedColumnAccess = 4418
59 IncorrectOldPassword = 4419
60
[end of mathesar/api/exceptions/error_codes.py]
[start of mathesar/api/db/viewsets/schemas.py]
1 from django_filters import rest_framework as filters
2 from rest_access_policy import AccessViewSetMixin
3 from rest_framework import status, viewsets
4 from rest_framework.decorators import action
5 from rest_framework.mixins import ListModelMixin, RetrieveModelMixin
6 from rest_framework.response import Response
7
8 from mathesar.api.db.permissions.schema import SchemaAccessPolicy
9 from mathesar.api.dj_filters import SchemaFilter
10 from mathesar.api.pagination import DefaultLimitOffsetPagination
11 from mathesar.api.serializers.dependents import DependentSerializer, DependentFilterSerializer
12 from mathesar.api.serializers.schemas import SchemaSerializer
13 from mathesar.models.base import Schema
14 from mathesar.utils.schemas import create_schema_and_object
15
16
17 class SchemaViewSet(AccessViewSetMixin, viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin):
18 serializer_class = SchemaSerializer
19 pagination_class = DefaultLimitOffsetPagination
20 filter_backends = (filters.DjangoFilterBackend,)
21 filterset_class = SchemaFilter
22 access_policy = SchemaAccessPolicy
23
24 def get_queryset(self):
25 qs = Schema.objects.all().order_by('-created_at')
26 return self.access_policy.scope_viewset_queryset(self.request, qs)
27
28 def create(self, request):
29 serializer = SchemaSerializer(data=request.data, context={'request': request})
30 serializer.is_valid(raise_exception=True)
31 database_name = serializer.validated_data['database'].name
32 schema = create_schema_and_object(
33 serializer.validated_data['name'],
34 database_name,
35 comment=serializer.validated_data.get('description')
36 )
37 serializer = SchemaSerializer(schema)
38 return Response(serializer.data, status=status.HTTP_201_CREATED)
39
40 def partial_update(self, request, pk=None):
41 serializer = SchemaSerializer(
42 data=request.data, context={'request': request}, partial=True
43 )
44 serializer.is_valid(raise_exception=True)
45
46 schema = self.get_object()
47 schema.update_sa_schema(serializer.validated_data)
48
49 # Reload the schema to avoid cached properties
50 schema = self.get_object()
51 schema.clear_name_cache()
52 serializer = SchemaSerializer(schema, context={'request': request})
53 return Response(serializer.data)
54
55 def destroy(self, request, pk=None):
56 schema = self.get_object()
57 schema.delete_sa_schema()
58 return Response(status=status.HTTP_204_NO_CONTENT)
59
60 @action(methods=['get'], detail=True)
61 def dependents(self, request, pk=None):
62 serializer = DependentFilterSerializer(data=request.GET)
63 serializer.is_valid(raise_exception=True)
64 types_exclude = serializer.validated_data['exclude']
65
66 schema = self.get_object()
67 serializer = DependentSerializer(schema.get_dependents(types_exclude), many=True, context={'request': request})
68 return Response(serializer.data)
69
[end of mathesar/api/db/viewsets/schemas.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mathesar/api/db/viewsets/schemas.py b/mathesar/api/db/viewsets/schemas.py
--- a/mathesar/api/db/viewsets/schemas.py
+++ b/mathesar/api/db/viewsets/schemas.py
@@ -12,6 +12,7 @@
from mathesar.api.serializers.schemas import SchemaSerializer
from mathesar.models.base import Schema
from mathesar.utils.schemas import create_schema_and_object
+from mathesar.api.exceptions.validation_exceptions.exceptions import EditingPublicSchemaIsDisallowed
class SchemaViewSet(AccessViewSetMixin, viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin):
@@ -44,6 +45,11 @@
serializer.is_valid(raise_exception=True)
schema = self.get_object()
+
+ # We forbid editing the public schema
+ if schema.name == "public":
+ raise EditingPublicSchemaIsDisallowed()
+
schema.update_sa_schema(serializer.validated_data)
# Reload the schema to avoid cached properties
diff --git a/mathesar/api/exceptions/error_codes.py b/mathesar/api/exceptions/error_codes.py
--- a/mathesar/api/exceptions/error_codes.py
+++ b/mathesar/api/exceptions/error_codes.py
@@ -57,3 +57,4 @@
DictHasBadKeys = 4416
DeletedColumnAccess = 4418
IncorrectOldPassword = 4419
+ EditingPublicSchema = 4421
diff --git a/mathesar/api/exceptions/validation_exceptions/exceptions.py b/mathesar/api/exceptions/validation_exceptions/exceptions.py
--- a/mathesar/api/exceptions/validation_exceptions/exceptions.py
+++ b/mathesar/api/exceptions/validation_exceptions/exceptions.py
@@ -154,8 +154,18 @@
def __init__(
self,
-
field=None,
):
- message = "Old password is not correct"
+ message = "Old password is not correct."
super().__init__(None, self.error_code, message, field, None)
+
+
+class EditingPublicSchemaIsDisallowed(MathesarValidationException):
+ error_code = ErrorCodes.EditingPublicSchema.value
+
+ def __init__(
+ self,
+ message="Editing the public schema is disallowed.",
+ field=None,
+ ):
+ super().__init__(None, self.error_code, message, field)
| {"golden_diff": "diff --git a/mathesar/api/db/viewsets/schemas.py b/mathesar/api/db/viewsets/schemas.py\n--- a/mathesar/api/db/viewsets/schemas.py\n+++ b/mathesar/api/db/viewsets/schemas.py\n@@ -12,6 +12,7 @@\n from mathesar.api.serializers.schemas import SchemaSerializer\n from mathesar.models.base import Schema\n from mathesar.utils.schemas import create_schema_and_object\n+from mathesar.api.exceptions.validation_exceptions.exceptions import EditingPublicSchemaIsDisallowed\n \n \n class SchemaViewSet(AccessViewSetMixin, viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin):\n@@ -44,6 +45,11 @@\n serializer.is_valid(raise_exception=True)\n \n schema = self.get_object()\n+\n+ # We forbid editing the public schema\n+ if schema.name == \"public\":\n+ raise EditingPublicSchemaIsDisallowed()\n+\n schema.update_sa_schema(serializer.validated_data)\n \n # Reload the schema to avoid cached properties\ndiff --git a/mathesar/api/exceptions/error_codes.py b/mathesar/api/exceptions/error_codes.py\n--- a/mathesar/api/exceptions/error_codes.py\n+++ b/mathesar/api/exceptions/error_codes.py\n@@ -57,3 +57,4 @@\n DictHasBadKeys = 4416\n DeletedColumnAccess = 4418\n IncorrectOldPassword = 4419\n+ EditingPublicSchema = 4421\ndiff --git a/mathesar/api/exceptions/validation_exceptions/exceptions.py b/mathesar/api/exceptions/validation_exceptions/exceptions.py\n--- a/mathesar/api/exceptions/validation_exceptions/exceptions.py\n+++ b/mathesar/api/exceptions/validation_exceptions/exceptions.py\n@@ -154,8 +154,18 @@\n \n def __init__(\n self,\n-\n field=None,\n ):\n- message = \"Old password is not correct\"\n+ message = \"Old password is not correct.\"\n super().__init__(None, self.error_code, message, field, None)\n+\n+\n+class EditingPublicSchemaIsDisallowed(MathesarValidationException):\n+ error_code = ErrorCodes.EditingPublicSchema.value\n+\n+ def __init__(\n+ self,\n+ message=\"Editing the public schema is disallowed.\",\n+ field=None,\n+ ):\n+ super().__init__(None, self.error_code, message, field)\n", "issue": "Don't allow public schema to be edited\nWe currently allow editing the public schema's name and description from the schema page. We should disable this.\r\n\r\n\r\n<img width=\"968\" alt=\"Screen Shot 2022-11-18 at 12 03 58 PM\" src=\"https://user-images.githubusercontent.com/287034/202761131-265da99a-6f76-4f02-b680-7a98851427b3.png\">\r\n\n", "before_files": [{"content": "from mathesar.api.exceptions.error_codes import ErrorCodes\nfrom mathesar.api.exceptions.validation_exceptions.base_exceptions import MathesarValidationException\n\n\nclass DistinctColumnRequiredAPIException(MathesarValidationException):\n error_code = ErrorCodes.DistinctColumnNameRequired.value\n\n def __init__(\n self,\n message=\"Column names must be distinct\",\n field=None,\n details=None,\n ):\n super().__init__(None, self.error_code, message, field, details)\n\n\nclass ColumnSizeMismatchAPIException(MathesarValidationException):\n error_code = ErrorCodes.ColumnSizeMismatch.value\n\n def __init__(\n self,\n message=\"Incorrect number of columns in request.\",\n field=None,\n details=None,\n ):\n super().__init__(None, self.error_code, message, field, details)\n\n\nclass InvalidLinkChoiceAPIException(MathesarValidationException):\n error_code = ErrorCodes.InvalidLinkChoice.value\n\n def __init__(\n self,\n message=\"Invalid Link type\",\n field=None,\n details=None,\n ):\n super().__init__(None, self.error_code, message, field, details)\n\n\nclass MultipleDataFileAPIException(MathesarValidationException):\n error_code = ErrorCodes.MultipleDataFiles.value\n\n def __init__(\n self,\n message=\"Multiple data files are unsupported.\",\n field=None,\n details=None,\n ):\n super().__init__(None, self.error_code, message, field, details)\n\n\nclass UnknownDatabaseTypeIdentifier(MathesarValidationException):\n error_code = ErrorCodes.UnknownDBType.value\n\n def __init__(\n self,\n db_type_id,\n field=None,\n details=None,\n ):\n message = f\"Unknown database type identifier {db_type_id}.\"\n super().__init__(None, self.error_code, message, field, details)\n\n\nclass MoneyDisplayOptionValueConflictAPIException(MathesarValidationException):\n error_code = ErrorCodes.MoneyDisplayOptionConflict.value\n\n def __init__(\n self,\n message=\"Money type cannot specify a currency code display option as well as other display options.\",\n field=None,\n details=None,\n ):\n super().__init__(None, self.error_code, message, field, details)\n\n\nclass IncompatibleFractionDigitValuesAPIException(MathesarValidationException):\n error_code = ErrorCodes.IncompatibleFractionDigitValues.value\n\n def __init__(\n self,\n message=\"maximum_fraction_digits cannot be less than minimum_fraction_digits.\",\n field=None,\n details=None,\n ):\n super().__init__(None, self.error_code, message, field, details)\n\n\nclass UnsupportedConstraintAPIException(MathesarValidationException):\n error_code = ErrorCodes.UnsupportedConstraint.value\n\n def __init__(\n self,\n constraint_type,\n field=None,\n ):\n message = f\"Operations related to {constraint_type} constraint are currently not supported\"\n super().__init__(None, self.error_code, message, field, None)\n\n\nclass ConstraintColumnEmptyAPIException(MathesarValidationException):\n error_code = ErrorCodes.ConstraintColumnEmpty.value\n\n def __init__(\n self,\n field=None,\n ):\n message = \"Constraint column field cannot be empty\"\n super().__init__(None, self.error_code, message, field, None)\n\n\nclass InvalidValueType(MathesarValidationException):\n error_code = ErrorCodes.InvalidValueType.value\n\n def __init__(\n self,\n message=None,\n field=None,\n ):\n if message is None:\n message = \"Value's type is invalid.\"\n super().__init__(None, self.error_code, message, field, None)\n\n\nclass DictHasBadKeys(MathesarValidationException):\n error_code = ErrorCodes.DictHasBadKeys.value\n\n def __init__(\n self,\n message=None,\n field=None,\n ):\n if message is None:\n message = \"Dictionary's keys are invalid or obligatory keys are missing.\"\n super().__init__(None, self.error_code, message, field, None)\n\n\nclass InvalidTableName(MathesarValidationException):\n error_code = ErrorCodes.InvalidTableName.value\n\n def __init__(\n self,\n table_name,\n message=None,\n field=None,\n ):\n message = f'Table name \"{table_name}\" is invalid.'\n super().__init__(None, self.error_code, message, field, None)\n\n\nclass IncorrectOldPassword(MathesarValidationException):\n error_code = ErrorCodes.IncorrectOldPassword.value\n\n def __init__(\n self,\n\n field=None,\n ):\n message = \"Old password is not correct\"\n super().__init__(None, self.error_code, message, field, None)\n", "path": "mathesar/api/exceptions/validation_exceptions/exceptions.py"}, {"content": "from enum import Enum, unique\n\n\n@unique\nclass ErrorCodes(Enum):\n # Matches with default code of drf-friendly-errors library\n # API Error\n MethodNotAllowed = 4006\n NotFound = 4005\n TableNotFound = 4041\n RecordNotFound = 4042\n UnknownError = 4999\n # Generic Errors\n ProgrammingError = 4101\n TypeError = 4102\n ValueError = 4103\n\n # DB Error Codes\n DuplicateTableError = 4205\n DuplicateColumnError = 4206\n ExclusionViolation = 4213\n ForeignKeyViolation = 4212\n InvalidTypeCast = 4203\n InvalidTypeOption = 4210\n InvalidDefault = 4211\n NonClassifiedIntegrityError = 4201\n NotNullViolation = 4204\n RaiseException = 4202\n TypeMismatchViolation = 4214\n UndefinedFunction = 4207\n UniqueViolation = 4208\n UnsupportedType = 4209\n\n # Data Imports error code\n InvalidTableError = 4301\n UniqueImportViolation = 4303\n\n # Validation Error\n ColumnSizeMismatch = 4401\n DistinctColumnNameRequired = 4402\n MappingsNotFound = 4417\n MultipleDataFiles = 4400\n MoneyDisplayOptionConflict = 4407\n UnsupportedAlter = 4403\n URLDownloadError = 4404\n URLNotReachableError = 4405\n URLInvalidContentType = 4406\n UnknownDBType = 4408\n InvalidDateError = 4413\n InvalidDateFormatError = 4414\n InvalidLinkChoice = 4409\n InvalidTableName = 4420\n IncompatibleFractionDigitValues = 4410\n UnsupportedConstraint = 4411\n ConstraintColumnEmpty = 4412\n InvalidValueType = 4415\n DictHasBadKeys = 4416\n DeletedColumnAccess = 4418\n IncorrectOldPassword = 4419\n", "path": "mathesar/api/exceptions/error_codes.py"}, {"content": "from django_filters import rest_framework as filters\nfrom rest_access_policy import AccessViewSetMixin\nfrom rest_framework import status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.mixins import ListModelMixin, RetrieveModelMixin\nfrom rest_framework.response import Response\n\nfrom mathesar.api.db.permissions.schema import SchemaAccessPolicy\nfrom mathesar.api.dj_filters import SchemaFilter\nfrom mathesar.api.pagination import DefaultLimitOffsetPagination\nfrom mathesar.api.serializers.dependents import DependentSerializer, DependentFilterSerializer\nfrom mathesar.api.serializers.schemas import SchemaSerializer\nfrom mathesar.models.base import Schema\nfrom mathesar.utils.schemas import create_schema_and_object\n\n\nclass SchemaViewSet(AccessViewSetMixin, viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin):\n serializer_class = SchemaSerializer\n pagination_class = DefaultLimitOffsetPagination\n filter_backends = (filters.DjangoFilterBackend,)\n filterset_class = SchemaFilter\n access_policy = SchemaAccessPolicy\n\n def get_queryset(self):\n qs = Schema.objects.all().order_by('-created_at')\n return self.access_policy.scope_viewset_queryset(self.request, qs)\n\n def create(self, request):\n serializer = SchemaSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n database_name = serializer.validated_data['database'].name\n schema = create_schema_and_object(\n serializer.validated_data['name'],\n database_name,\n comment=serializer.validated_data.get('description')\n )\n serializer = SchemaSerializer(schema)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n def partial_update(self, request, pk=None):\n serializer = SchemaSerializer(\n data=request.data, context={'request': request}, partial=True\n )\n serializer.is_valid(raise_exception=True)\n\n schema = self.get_object()\n schema.update_sa_schema(serializer.validated_data)\n\n # Reload the schema to avoid cached properties\n schema = self.get_object()\n schema.clear_name_cache()\n serializer = SchemaSerializer(schema, context={'request': request})\n return Response(serializer.data)\n\n def destroy(self, request, pk=None):\n schema = self.get_object()\n schema.delete_sa_schema()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(methods=['get'], detail=True)\n def dependents(self, request, pk=None):\n serializer = DependentFilterSerializer(data=request.GET)\n serializer.is_valid(raise_exception=True)\n types_exclude = serializer.validated_data['exclude']\n\n schema = self.get_object()\n serializer = DependentSerializer(schema.get_dependents(types_exclude), many=True, context={'request': request})\n return Response(serializer.data)\n", "path": "mathesar/api/db/viewsets/schemas.py"}]} | 3,438 | 512 |
gh_patches_debug_14182 | rasdani/github-patches | git_diff | fonttools__fonttools-2376 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[ttLib] font.importXML() always sets sfntVersion from imported TTX
When using `ttx -m` or `font.importXML()`, the font's `sfntVersion` is unconditionally taken from the imported TTX. That is correct if we're doing a full import (starting with an empty TTFont) but is dubious when importing some tables into an existing font. Importing (say) a name table from an .otf into an existing .ttf will set `sfntValue` to "OTTO", which is incorrect for TTF.
Perhaps we should not set `sfntVersion` if `importXML()` is called on a non-empty font.
</issue>
<code>
[start of Lib/fontTools/misc/xmlReader.py]
1 from fontTools import ttLib
2 from fontTools.misc.textTools import safeEval
3 from fontTools.ttLib.tables.DefaultTable import DefaultTable
4 import sys
5 import os
6 import logging
7
8
9 log = logging.getLogger(__name__)
10
11 class TTXParseError(Exception): pass
12
13 BUFSIZE = 0x4000
14
15
16 class XMLReader(object):
17
18 def __init__(self, fileOrPath, ttFont, progress=None, quiet=None, contentOnly=False):
19 if fileOrPath == '-':
20 fileOrPath = sys.stdin
21 if not hasattr(fileOrPath, "read"):
22 self.file = open(fileOrPath, "rb")
23 self._closeStream = True
24 else:
25 # assume readable file object
26 self.file = fileOrPath
27 self._closeStream = False
28 self.ttFont = ttFont
29 self.progress = progress
30 if quiet is not None:
31 from fontTools.misc.loggingTools import deprecateArgument
32 deprecateArgument("quiet", "configure logging instead")
33 self.quiet = quiet
34 self.root = None
35 self.contentStack = []
36 self.contentOnly = contentOnly
37 self.stackSize = 0
38
39 def read(self, rootless=False):
40 if rootless:
41 self.stackSize += 1
42 if self.progress:
43 self.file.seek(0, 2)
44 fileSize = self.file.tell()
45 self.progress.set(0, fileSize // 100 or 1)
46 self.file.seek(0)
47 self._parseFile(self.file)
48 if self._closeStream:
49 self.close()
50 if rootless:
51 self.stackSize -= 1
52
53 def close(self):
54 self.file.close()
55
56 def _parseFile(self, file):
57 from xml.parsers.expat import ParserCreate
58 parser = ParserCreate()
59 parser.StartElementHandler = self._startElementHandler
60 parser.EndElementHandler = self._endElementHandler
61 parser.CharacterDataHandler = self._characterDataHandler
62
63 pos = 0
64 while True:
65 chunk = file.read(BUFSIZE)
66 if not chunk:
67 parser.Parse(chunk, 1)
68 break
69 pos = pos + len(chunk)
70 if self.progress:
71 self.progress.set(pos // 100)
72 parser.Parse(chunk, 0)
73
74 def _startElementHandler(self, name, attrs):
75 if self.stackSize == 1 and self.contentOnly:
76 # We already know the table we're parsing, skip
77 # parsing the table tag and continue to
78 # stack '2' which begins parsing content
79 self.contentStack.append([])
80 self.stackSize = 2
81 return
82 stackSize = self.stackSize
83 self.stackSize = stackSize + 1
84 subFile = attrs.get("src")
85 if subFile is not None:
86 if hasattr(self.file, 'name'):
87 # if file has a name, get its parent directory
88 dirname = os.path.dirname(self.file.name)
89 else:
90 # else fall back to using the current working directory
91 dirname = os.getcwd()
92 subFile = os.path.join(dirname, subFile)
93 if not stackSize:
94 if name != "ttFont":
95 raise TTXParseError("illegal root tag: %s" % name)
96 sfntVersion = attrs.get("sfntVersion")
97 if sfntVersion is not None:
98 if len(sfntVersion) != 4:
99 sfntVersion = safeEval('"' + sfntVersion + '"')
100 self.ttFont.sfntVersion = sfntVersion
101 self.contentStack.append([])
102 elif stackSize == 1:
103 if subFile is not None:
104 subReader = XMLReader(subFile, self.ttFont, self.progress)
105 subReader.read()
106 self.contentStack.append([])
107 return
108 tag = ttLib.xmlToTag(name)
109 msg = "Parsing '%s' table..." % tag
110 if self.progress:
111 self.progress.setLabel(msg)
112 log.info(msg)
113 if tag == "GlyphOrder":
114 tableClass = ttLib.GlyphOrder
115 elif "ERROR" in attrs or ('raw' in attrs and safeEval(attrs['raw'])):
116 tableClass = DefaultTable
117 else:
118 tableClass = ttLib.getTableClass(tag)
119 if tableClass is None:
120 tableClass = DefaultTable
121 if tag == 'loca' and tag in self.ttFont:
122 # Special-case the 'loca' table as we need the
123 # original if the 'glyf' table isn't recompiled.
124 self.currentTable = self.ttFont[tag]
125 else:
126 self.currentTable = tableClass(tag)
127 self.ttFont[tag] = self.currentTable
128 self.contentStack.append([])
129 elif stackSize == 2 and subFile is not None:
130 subReader = XMLReader(subFile, self.ttFont, self.progress, contentOnly=True)
131 subReader.read()
132 self.contentStack.append([])
133 self.root = subReader.root
134 elif stackSize == 2:
135 self.contentStack.append([])
136 self.root = (name, attrs, self.contentStack[-1])
137 else:
138 l = []
139 self.contentStack[-1].append((name, attrs, l))
140 self.contentStack.append(l)
141
142 def _characterDataHandler(self, data):
143 if self.stackSize > 1:
144 self.contentStack[-1].append(data)
145
146 def _endElementHandler(self, name):
147 self.stackSize = self.stackSize - 1
148 del self.contentStack[-1]
149 if not self.contentOnly:
150 if self.stackSize == 1:
151 self.root = None
152 elif self.stackSize == 2:
153 name, attrs, content = self.root
154 self.currentTable.fromXML(name, attrs, content, self.ttFont)
155 self.root = None
156
157
158 class ProgressPrinter(object):
159
160 def __init__(self, title, maxval=100):
161 print(title)
162
163 def set(self, val, maxval=None):
164 pass
165
166 def increment(self, val=1):
167 pass
168
169 def setLabel(self, text):
170 print(text)
171
[end of Lib/fontTools/misc/xmlReader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/Lib/fontTools/misc/xmlReader.py b/Lib/fontTools/misc/xmlReader.py
--- a/Lib/fontTools/misc/xmlReader.py
+++ b/Lib/fontTools/misc/xmlReader.py
@@ -93,11 +93,12 @@
if not stackSize:
if name != "ttFont":
raise TTXParseError("illegal root tag: %s" % name)
- sfntVersion = attrs.get("sfntVersion")
- if sfntVersion is not None:
- if len(sfntVersion) != 4:
- sfntVersion = safeEval('"' + sfntVersion + '"')
- self.ttFont.sfntVersion = sfntVersion
+ if self.ttFont.reader is None and not self.ttFont.tables:
+ sfntVersion = attrs.get("sfntVersion")
+ if sfntVersion is not None:
+ if len(sfntVersion) != 4:
+ sfntVersion = safeEval('"' + sfntVersion + '"')
+ self.ttFont.sfntVersion = sfntVersion
self.contentStack.append([])
elif stackSize == 1:
if subFile is not None:
| {"golden_diff": "diff --git a/Lib/fontTools/misc/xmlReader.py b/Lib/fontTools/misc/xmlReader.py\n--- a/Lib/fontTools/misc/xmlReader.py\n+++ b/Lib/fontTools/misc/xmlReader.py\n@@ -93,11 +93,12 @@\n \t\tif not stackSize:\n \t\t\tif name != \"ttFont\":\n \t\t\t\traise TTXParseError(\"illegal root tag: %s\" % name)\n-\t\t\tsfntVersion = attrs.get(\"sfntVersion\")\n-\t\t\tif sfntVersion is not None:\n-\t\t\t\tif len(sfntVersion) != 4:\n-\t\t\t\t\tsfntVersion = safeEval('\"' + sfntVersion + '\"')\n-\t\t\t\tself.ttFont.sfntVersion = sfntVersion\n+\t\t\tif self.ttFont.reader is None and not self.ttFont.tables:\n+\t\t\t\tsfntVersion = attrs.get(\"sfntVersion\")\n+\t\t\t\tif sfntVersion is not None:\n+\t\t\t\t\tif len(sfntVersion) != 4:\n+\t\t\t\t\t\tsfntVersion = safeEval('\"' + sfntVersion + '\"')\n+\t\t\t\t\tself.ttFont.sfntVersion = sfntVersion\n \t\t\tself.contentStack.append([])\n \t\telif stackSize == 1:\n \t\t\tif subFile is not None:\n", "issue": "[ttLib] font.importXML() always sets sfntVersion from imported TTX\nWhen using `ttx -m` or `font.importXML()`, the font's `sfntVersion` is unconditionally taken from the imported TTX. That is correct if we're doing a full import (starting with an empty TTFont) but is dubious when importing some tables into an existing font. Importing (say) a name table from an .otf into an existing .ttf will set `sfntValue` to \"OTTO\", which is incorrect for TTF.\r\n\r\nPerhaps we should not set `sfntVersion` if `importXML()` is called on a non-empty font.\n", "before_files": [{"content": "from fontTools import ttLib\nfrom fontTools.misc.textTools import safeEval\nfrom fontTools.ttLib.tables.DefaultTable import DefaultTable\nimport sys\nimport os\nimport logging\n\n\nlog = logging.getLogger(__name__)\n\nclass TTXParseError(Exception): pass\n\nBUFSIZE = 0x4000\n\n\nclass XMLReader(object):\n\n\tdef __init__(self, fileOrPath, ttFont, progress=None, quiet=None, contentOnly=False):\n\t\tif fileOrPath == '-':\n\t\t\tfileOrPath = sys.stdin\n\t\tif not hasattr(fileOrPath, \"read\"):\n\t\t\tself.file = open(fileOrPath, \"rb\")\n\t\t\tself._closeStream = True\n\t\telse:\n\t\t\t# assume readable file object\n\t\t\tself.file = fileOrPath\n\t\t\tself._closeStream = False\n\t\tself.ttFont = ttFont\n\t\tself.progress = progress\n\t\tif quiet is not None:\n\t\t\tfrom fontTools.misc.loggingTools import deprecateArgument\n\t\t\tdeprecateArgument(\"quiet\", \"configure logging instead\")\n\t\t\tself.quiet = quiet\n\t\tself.root = None\n\t\tself.contentStack = []\n\t\tself.contentOnly = contentOnly\n\t\tself.stackSize = 0\n\n\tdef read(self, rootless=False):\n\t\tif rootless:\n\t\t\tself.stackSize += 1\n\t\tif self.progress:\n\t\t\tself.file.seek(0, 2)\n\t\t\tfileSize = self.file.tell()\n\t\t\tself.progress.set(0, fileSize // 100 or 1)\n\t\t\tself.file.seek(0)\n\t\tself._parseFile(self.file)\n\t\tif self._closeStream:\n\t\t\tself.close()\n\t\tif rootless:\n\t\t\tself.stackSize -= 1\n\n\tdef close(self):\n\t\tself.file.close()\n\n\tdef _parseFile(self, file):\n\t\tfrom xml.parsers.expat import ParserCreate\n\t\tparser = ParserCreate()\n\t\tparser.StartElementHandler = self._startElementHandler\n\t\tparser.EndElementHandler = self._endElementHandler\n\t\tparser.CharacterDataHandler = self._characterDataHandler\n\n\t\tpos = 0\n\t\twhile True:\n\t\t\tchunk = file.read(BUFSIZE)\n\t\t\tif not chunk:\n\t\t\t\tparser.Parse(chunk, 1)\n\t\t\t\tbreak\n\t\t\tpos = pos + len(chunk)\n\t\t\tif self.progress:\n\t\t\t\tself.progress.set(pos // 100)\n\t\t\tparser.Parse(chunk, 0)\n\n\tdef _startElementHandler(self, name, attrs):\n\t\tif self.stackSize == 1 and self.contentOnly:\n\t\t\t# We already know the table we're parsing, skip\n\t\t\t# parsing the table tag and continue to\n\t\t\t# stack '2' which begins parsing content\n\t\t\tself.contentStack.append([])\n\t\t\tself.stackSize = 2\n\t\t\treturn\n\t\tstackSize = self.stackSize\n\t\tself.stackSize = stackSize + 1\n\t\tsubFile = attrs.get(\"src\")\n\t\tif subFile is not None:\n\t\t\tif hasattr(self.file, 'name'):\n\t\t\t\t# if file has a name, get its parent directory\n\t\t\t\tdirname = os.path.dirname(self.file.name)\n\t\t\telse:\n\t\t\t\t# else fall back to using the current working directory\n\t\t\t\tdirname = os.getcwd()\n\t\t\tsubFile = os.path.join(dirname, subFile)\n\t\tif not stackSize:\n\t\t\tif name != \"ttFont\":\n\t\t\t\traise TTXParseError(\"illegal root tag: %s\" % name)\n\t\t\tsfntVersion = attrs.get(\"sfntVersion\")\n\t\t\tif sfntVersion is not None:\n\t\t\t\tif len(sfntVersion) != 4:\n\t\t\t\t\tsfntVersion = safeEval('\"' + sfntVersion + '\"')\n\t\t\t\tself.ttFont.sfntVersion = sfntVersion\n\t\t\tself.contentStack.append([])\n\t\telif stackSize == 1:\n\t\t\tif subFile is not None:\n\t\t\t\tsubReader = XMLReader(subFile, self.ttFont, self.progress)\n\t\t\t\tsubReader.read()\n\t\t\t\tself.contentStack.append([])\n\t\t\t\treturn\n\t\t\ttag = ttLib.xmlToTag(name)\n\t\t\tmsg = \"Parsing '%s' table...\" % tag\n\t\t\tif self.progress:\n\t\t\t\tself.progress.setLabel(msg)\n\t\t\tlog.info(msg)\n\t\t\tif tag == \"GlyphOrder\":\n\t\t\t\ttableClass = ttLib.GlyphOrder\n\t\t\telif \"ERROR\" in attrs or ('raw' in attrs and safeEval(attrs['raw'])):\n\t\t\t\ttableClass = DefaultTable\n\t\t\telse:\n\t\t\t\ttableClass = ttLib.getTableClass(tag)\n\t\t\t\tif tableClass is None:\n\t\t\t\t\ttableClass = DefaultTable\n\t\t\tif tag == 'loca' and tag in self.ttFont:\n\t\t\t\t# Special-case the 'loca' table as we need the\n\t\t\t\t# original if the 'glyf' table isn't recompiled.\n\t\t\t\tself.currentTable = self.ttFont[tag]\n\t\t\telse:\n\t\t\t\tself.currentTable = tableClass(tag)\n\t\t\t\tself.ttFont[tag] = self.currentTable\n\t\t\tself.contentStack.append([])\n\t\telif stackSize == 2 and subFile is not None:\n\t\t\tsubReader = XMLReader(subFile, self.ttFont, self.progress, contentOnly=True)\n\t\t\tsubReader.read()\n\t\t\tself.contentStack.append([])\n\t\t\tself.root = subReader.root\n\t\telif stackSize == 2:\n\t\t\tself.contentStack.append([])\n\t\t\tself.root = (name, attrs, self.contentStack[-1])\n\t\telse:\n\t\t\tl = []\n\t\t\tself.contentStack[-1].append((name, attrs, l))\n\t\t\tself.contentStack.append(l)\n\n\tdef _characterDataHandler(self, data):\n\t\tif self.stackSize > 1:\n\t\t\tself.contentStack[-1].append(data)\n\n\tdef _endElementHandler(self, name):\n\t\tself.stackSize = self.stackSize - 1\n\t\tdel self.contentStack[-1]\n\t\tif not self.contentOnly:\n\t\t\tif self.stackSize == 1:\n\t\t\t\tself.root = None\n\t\t\telif self.stackSize == 2:\n\t\t\t\tname, attrs, content = self.root\n\t\t\t\tself.currentTable.fromXML(name, attrs, content, self.ttFont)\n\t\t\t\tself.root = None\n\n\nclass ProgressPrinter(object):\n\n\tdef __init__(self, title, maxval=100):\n\t\tprint(title)\n\n\tdef set(self, val, maxval=None):\n\t\tpass\n\n\tdef increment(self, val=1):\n\t\tpass\n\n\tdef setLabel(self, text):\n\t\tprint(text)\n", "path": "Lib/fontTools/misc/xmlReader.py"}]} | 2,419 | 257 |
gh_patches_debug_40167 | rasdani/github-patches | git_diff | mosaicml__composer-534 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incorrect setting of persistent_workers hparam
DataloaderHparams:
`persistent_workers: bool = hp.optional("Whether to shutdown workers after the dataset has been consumed once", default=True)`
This makes it sound like the default option which is True shuts down the workers after the dataset has been consumed once. But when calling torch Dataloader, the default is False and this option keeps the workers alive.
</issue>
<code>
[start of composer/datasets/dataloader.py]
1 # Copyright 2021 MosaicML. All Rights Reserved.
2
3 from __future__ import annotations
4
5 import logging
6 import textwrap
7 from dataclasses import dataclass
8 from typing import Any, Callable, Iterator, Optional
9
10 import torch
11 import torch.distributed
12 import torch.utils.data
13 import yahp as hp
14
15 from composer.core.types import Batch, DataLoader, Dataset
16
17 log = logging.getLogger(__name__)
18
19
20 class WrappedDataLoader(DataLoader):
21
22 def __init__(self, dataloader: DataLoader) -> None:
23 if self.is_dataloader_already_wrapped(dataloader):
24 log.debug(
25 textwrap.dedent("""\
26 The dataloader is already wrapped with %s; it will be wrapped again.
27 If this is unintended behavior, guard the wrapping of the dataloader i.e. with:
28 if not %s.is_dataloader_already_wrapped(dataloader): dataloader = %s(dataloader)"""),
29 type(self).__name__,
30 type(self).__name__,
31 type(self).__name__,
32 )
33 self.dataset = dataloader.dataset
34 self.batch_size = dataloader.batch_size
35 self.num_workers = dataloader.num_workers
36 self.pin_memory = dataloader.pin_memory
37 self.drop_last = dataloader.drop_last
38 self.timeout = dataloader.timeout
39 self.sampler = dataloader.sampler
40 self.prefetch_factor = dataloader.prefetch_factor
41 self.dataloader = dataloader
42
43 def __len__(self) -> int:
44 return len(self.dataloader)
45
46 def __iter__(self) -> Iterator[Batch]:
47 return iter(self.dataloader)
48
49 def __bool__(self) -> bool:
50 return True
51
52 def __setattr__(self, name: str, value: Any) -> None:
53 if hasattr(self, name) and name in ("dataset", "batch_size", "num_workers", "pin_memory", "drop_last",
54 "timeout", "sampler", "prefetch_factor", "dataloader"):
55 raise RuntimeError(f"Property {name} cannot be set after initialization in a DataLoader")
56 return super().__setattr__(name, value)
57
58 @classmethod
59 def is_dataloader_already_wrapped(cls, dataloader: DataLoader):
60 """Returns whether the ``dataloader`` is wrapped with ``cls``. This helper method checks recursively through all
61 wrappings until the underlying dataloader is reached.
62
63 Args:
64 dataloader (DataLoader): The dataloader to check
65
66 Returns:
67 bool: Whether the ``dataloader`` is wrapped recursively with ``cls``.
68 """
69 if isinstance(dataloader, cls):
70 return True
71 if not isinstance(dataloader, WrappedDataLoader):
72 return False
73 if not isinstance(dataloader.dataloader, WrappedDataLoader):
74 return False
75 return cls.is_dataloader_already_wrapped(dataloader.dataloader)
76
77
78 def unwrap_data_loader(dataloader: DataLoader) -> DataLoader:
79 """Recursively unwraps a dataloader if it is of type :class:`WrappedDataLoader`.
80
81 Args:
82 dataloader (DataLoader): The dataloader to unwrap
83
84 Returns:
85 DataLoader: The underlying dataloader
86 """
87 if isinstance(dataloader, WrappedDataLoader):
88 return unwrap_data_loader(dataloader.dataloader)
89 return dataloader
90
91
92 @dataclass
93 class DataloaderHparams(hp.Hparams):
94 """Hyperparameters to initialize a :class:`~torch.utils.data.Dataloader`.
95
96 Parameters:
97 num_workers (int): Number of CPU workers to use per device to fetch data.
98 prefetch_factor (int): Number of samples loaded in advance by each worker.
99 2 means there will be a total of 2 * num_workers samples prefetched across all workers.
100 persistent_workers (bool): Whether or not to shutdown workers after the dataset has been consumed once.
101 pin_memory (bool): Whether or not to copy Tensors into CUDA pinned memory before returning them.
102 timeout (float): Timeout, in seconds, for collecting a batch from workers. Set to 0 for no timeout.
103 """
104
105 num_workers: int = hp.optional("Number of CPU workers to use per device to fetch data.", default=8)
106 prefetch_factor: int = hp.optional("Number of samples loaded in advance by each worker", default=2)
107 persistent_workers: bool = hp.optional("Whether to shutdown workers after the dataset has been consumed once",
108 default=True)
109 pin_memory: bool = hp.optional("Whether to copy Tensors into CUDA pinned memory before returning them",
110 default=True)
111 timeout: float = hp.optional("Timeout, in seconds, for collecting a batch from workers. Set to 0 for no timeout",
112 default=0)
113
114 def initialize_object(
115 self,
116 dataset: Dataset,
117 *,
118 batch_size: int,
119 sampler: Optional[torch.utils.data.Sampler[int]],
120 drop_last: bool,
121 collate_fn: Optional[Callable] = None,
122 worker_init_fn: Optional[Callable] = None,
123 ) -> DataLoader:
124 """Create a dataloader.
125
126 Args:
127 dataset (Dataset): The dataset.
128 batch_size (int): The per-device batch size.
129 sampler (torch.utils.data.Sampler[int] or None): The sampler to use for the dataloader.
130 drop_last (bool): Whether to drop the last batch if the number of
131 samples is not evenly divisible by the batch size.
132 collate_fn (callable, optional): Custom collate function. Defaults to None.
133 worker_init_fn (callable, optional): Custom worker init function. Defaults to None.
134
135 Returns:
136 DataLoader: The dataloader.
137 """
138
139 return torch.utils.data.DataLoader(dataset,
140 batch_size=batch_size,
141 num_workers=self.num_workers,
142 pin_memory=self.pin_memory,
143 drop_last=drop_last,
144 sampler=sampler,
145 collate_fn=collate_fn,
146 worker_init_fn=worker_init_fn,
147 timeout=self.timeout,
148 prefetch_factor=self.prefetch_factor,
149 persistent_workers=self.persistent_workers)
150
[end of composer/datasets/dataloader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/composer/datasets/dataloader.py b/composer/datasets/dataloader.py
--- a/composer/datasets/dataloader.py
+++ b/composer/datasets/dataloader.py
@@ -94,22 +94,42 @@
"""Hyperparameters to initialize a :class:`~torch.utils.data.Dataloader`.
Parameters:
- num_workers (int): Number of CPU workers to use per device to fetch data.
- prefetch_factor (int): Number of samples loaded in advance by each worker.
- 2 means there will be a total of 2 * num_workers samples prefetched across all workers.
- persistent_workers (bool): Whether or not to shutdown workers after the dataset has been consumed once.
- pin_memory (bool): Whether or not to copy Tensors into CUDA pinned memory before returning them.
- timeout (float): Timeout, in seconds, for collecting a batch from workers. Set to 0 for no timeout.
+ num_workers (int, optional): Number of CPU workers to use per device to fetch data.
+ Set to ``0`` to use the main training thread for dataloading.
+ While zero workers can be useful for debugging, it should not be used for performance reasons.
+ (default: ``8``)
+ prefetch_factor (int, optional): Number of samples loaded in advance by each worker.
+ For example, 2 means there will be a total of 2 * num_workers samples prefetched across all workers.
+ If ``num_workers = 0``, then the ``prefetch_factor`` must be left at the default value.
+ (default: ``2``)
+ persistent_workers (bool): Whether to reuse dataloader workers across epochs. If ``num_workers`` is 0,
+ then this field must be ``False``. (default: ``True``)
+ pin_memory (bool, optional): Whether or not to copy Tensors into CUDA pinned memory before returning them.
+ If ``num_workers = 0``, then the ``pin_memory`` must be ``False``. (default: ``True``)
+ timeout (float): Timeout, in seconds, for collecting a batch from workers. Set to ``0`` for no timeout.
+ (default: ``0``)
"""
- num_workers: int = hp.optional("Number of CPU workers to use per device to fetch data.", default=8)
- prefetch_factor: int = hp.optional("Number of samples loaded in advance by each worker", default=2)
- persistent_workers: bool = hp.optional("Whether to shutdown workers after the dataset has been consumed once",
+ num_workers: int = hp.optional(textwrap.dedent("""\
+ Number of CPU workers to use per device to fetch data.
+ Set to ``0`` to use the main training thread for dataloading.
+ While zero workers can be useful for debugging, it should not be used for performance reasons."""),
+ default=8)
+ prefetch_factor: int = hp.optional(textwrap.dedent("""\
+ Number of samples loaded in advance by each worker.
+ For example, 2 means there will be a total of 2 * num_workers samples prefetched across all workers.
+ If ``num_workers = 0``, then the ``prefetch_factor`` must be left at the default value."""),
+ default=2)
+ persistent_workers: bool = hp.optional(textwrap.dedent("""\
+ Whether to reuse dataloader workers across epochs. If ``num_workers`` is 0,
+ then this field must be ``False``"""),
default=True)
- pin_memory: bool = hp.optional("Whether to copy Tensors into CUDA pinned memory before returning them",
+ pin_memory: bool = hp.optional(textwrap.dedent("""\
+ Whether or not to copy Tensors into CUDA pinned memory before returning them.
+ If ``num_workers = 0``, then the ``pin_memory`` must be ``False``."""),
default=True)
- timeout: float = hp.optional("Timeout, in seconds, for collecting a batch from workers. Set to 0 for no timeout",
- default=0)
+ timeout: float = hp.optional(
+ "Timeout, in seconds, for collecting a batch from workers. Set to ``0`` for no timeout.", default=0)
def initialize_object(
self,
| {"golden_diff": "diff --git a/composer/datasets/dataloader.py b/composer/datasets/dataloader.py\n--- a/composer/datasets/dataloader.py\n+++ b/composer/datasets/dataloader.py\n@@ -94,22 +94,42 @@\n \"\"\"Hyperparameters to initialize a :class:`~torch.utils.data.Dataloader`.\n \n Parameters:\n- num_workers (int): Number of CPU workers to use per device to fetch data.\n- prefetch_factor (int): Number of samples loaded in advance by each worker.\n- 2 means there will be a total of 2 * num_workers samples prefetched across all workers.\n- persistent_workers (bool): Whether or not to shutdown workers after the dataset has been consumed once.\n- pin_memory (bool): Whether or not to copy Tensors into CUDA pinned memory before returning them.\n- timeout (float): Timeout, in seconds, for collecting a batch from workers. Set to 0 for no timeout.\n+ num_workers (int, optional): Number of CPU workers to use per device to fetch data.\n+ Set to ``0`` to use the main training thread for dataloading.\n+ While zero workers can be useful for debugging, it should not be used for performance reasons.\n+ (default: ``8``)\n+ prefetch_factor (int, optional): Number of samples loaded in advance by each worker.\n+ For example, 2 means there will be a total of 2 * num_workers samples prefetched across all workers.\n+ If ``num_workers = 0``, then the ``prefetch_factor`` must be left at the default value.\n+ (default: ``2``)\n+ persistent_workers (bool): Whether to reuse dataloader workers across epochs. If ``num_workers`` is 0,\n+ then this field must be ``False``. (default: ``True``)\n+ pin_memory (bool, optional): Whether or not to copy Tensors into CUDA pinned memory before returning them.\n+ If ``num_workers = 0``, then the ``pin_memory`` must be ``False``. (default: ``True``)\n+ timeout (float): Timeout, in seconds, for collecting a batch from workers. Set to ``0`` for no timeout.\n+ (default: ``0``)\n \"\"\"\n \n- num_workers: int = hp.optional(\"Number of CPU workers to use per device to fetch data.\", default=8)\n- prefetch_factor: int = hp.optional(\"Number of samples loaded in advance by each worker\", default=2)\n- persistent_workers: bool = hp.optional(\"Whether to shutdown workers after the dataset has been consumed once\",\n+ num_workers: int = hp.optional(textwrap.dedent(\"\"\"\\\n+ Number of CPU workers to use per device to fetch data.\n+ Set to ``0`` to use the main training thread for dataloading.\n+ While zero workers can be useful for debugging, it should not be used for performance reasons.\"\"\"),\n+ default=8)\n+ prefetch_factor: int = hp.optional(textwrap.dedent(\"\"\"\\\n+ Number of samples loaded in advance by each worker.\n+ For example, 2 means there will be a total of 2 * num_workers samples prefetched across all workers.\n+ If ``num_workers = 0``, then the ``prefetch_factor`` must be left at the default value.\"\"\"),\n+ default=2)\n+ persistent_workers: bool = hp.optional(textwrap.dedent(\"\"\"\\\n+ Whether to reuse dataloader workers across epochs. If ``num_workers`` is 0,\n+ then this field must be ``False``\"\"\"),\n default=True)\n- pin_memory: bool = hp.optional(\"Whether to copy Tensors into CUDA pinned memory before returning them\",\n+ pin_memory: bool = hp.optional(textwrap.dedent(\"\"\"\\\n+ Whether or not to copy Tensors into CUDA pinned memory before returning them.\n+ If ``num_workers = 0``, then the ``pin_memory`` must be ``False``.\"\"\"),\n default=True)\n- timeout: float = hp.optional(\"Timeout, in seconds, for collecting a batch from workers. Set to 0 for no timeout\",\n- default=0)\n+ timeout: float = hp.optional(\n+ \"Timeout, in seconds, for collecting a batch from workers. Set to ``0`` for no timeout.\", default=0)\n \n def initialize_object(\n self,\n", "issue": "Incorrect setting of persistent_workers hparam\nDataloaderHparams:\r\n`persistent_workers: bool = hp.optional(\"Whether to shutdown workers after the dataset has been consumed once\", default=True)`\r\n\r\nThis makes it sound like the default option which is True shuts down the workers after the dataset has been consumed once. But when calling torch Dataloader, the default is False and this option keeps the workers alive.\n", "before_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nfrom __future__ import annotations\n\nimport logging\nimport textwrap\nfrom dataclasses import dataclass\nfrom typing import Any, Callable, Iterator, Optional\n\nimport torch\nimport torch.distributed\nimport torch.utils.data\nimport yahp as hp\n\nfrom composer.core.types import Batch, DataLoader, Dataset\n\nlog = logging.getLogger(__name__)\n\n\nclass WrappedDataLoader(DataLoader):\n\n def __init__(self, dataloader: DataLoader) -> None:\n if self.is_dataloader_already_wrapped(dataloader):\n log.debug(\n textwrap.dedent(\"\"\"\\\n The dataloader is already wrapped with %s; it will be wrapped again.\n If this is unintended behavior, guard the wrapping of the dataloader i.e. with:\n if not %s.is_dataloader_already_wrapped(dataloader): dataloader = %s(dataloader)\"\"\"),\n type(self).__name__,\n type(self).__name__,\n type(self).__name__,\n )\n self.dataset = dataloader.dataset\n self.batch_size = dataloader.batch_size\n self.num_workers = dataloader.num_workers\n self.pin_memory = dataloader.pin_memory\n self.drop_last = dataloader.drop_last\n self.timeout = dataloader.timeout\n self.sampler = dataloader.sampler\n self.prefetch_factor = dataloader.prefetch_factor\n self.dataloader = dataloader\n\n def __len__(self) -> int:\n return len(self.dataloader)\n\n def __iter__(self) -> Iterator[Batch]:\n return iter(self.dataloader)\n\n def __bool__(self) -> bool:\n return True\n\n def __setattr__(self, name: str, value: Any) -> None:\n if hasattr(self, name) and name in (\"dataset\", \"batch_size\", \"num_workers\", \"pin_memory\", \"drop_last\",\n \"timeout\", \"sampler\", \"prefetch_factor\", \"dataloader\"):\n raise RuntimeError(f\"Property {name} cannot be set after initialization in a DataLoader\")\n return super().__setattr__(name, value)\n\n @classmethod\n def is_dataloader_already_wrapped(cls, dataloader: DataLoader):\n \"\"\"Returns whether the ``dataloader`` is wrapped with ``cls``. This helper method checks recursively through all\n wrappings until the underlying dataloader is reached.\n\n Args:\n dataloader (DataLoader): The dataloader to check\n\n Returns:\n bool: Whether the ``dataloader`` is wrapped recursively with ``cls``.\n \"\"\"\n if isinstance(dataloader, cls):\n return True\n if not isinstance(dataloader, WrappedDataLoader):\n return False\n if not isinstance(dataloader.dataloader, WrappedDataLoader):\n return False\n return cls.is_dataloader_already_wrapped(dataloader.dataloader)\n\n\ndef unwrap_data_loader(dataloader: DataLoader) -> DataLoader:\n \"\"\"Recursively unwraps a dataloader if it is of type :class:`WrappedDataLoader`.\n\n Args:\n dataloader (DataLoader): The dataloader to unwrap\n\n Returns:\n DataLoader: The underlying dataloader\n \"\"\"\n if isinstance(dataloader, WrappedDataLoader):\n return unwrap_data_loader(dataloader.dataloader)\n return dataloader\n\n\n@dataclass\nclass DataloaderHparams(hp.Hparams):\n \"\"\"Hyperparameters to initialize a :class:`~torch.utils.data.Dataloader`.\n\n Parameters:\n num_workers (int): Number of CPU workers to use per device to fetch data.\n prefetch_factor (int): Number of samples loaded in advance by each worker.\n 2 means there will be a total of 2 * num_workers samples prefetched across all workers.\n persistent_workers (bool): Whether or not to shutdown workers after the dataset has been consumed once.\n pin_memory (bool): Whether or not to copy Tensors into CUDA pinned memory before returning them.\n timeout (float): Timeout, in seconds, for collecting a batch from workers. Set to 0 for no timeout.\n \"\"\"\n\n num_workers: int = hp.optional(\"Number of CPU workers to use per device to fetch data.\", default=8)\n prefetch_factor: int = hp.optional(\"Number of samples loaded in advance by each worker\", default=2)\n persistent_workers: bool = hp.optional(\"Whether to shutdown workers after the dataset has been consumed once\",\n default=True)\n pin_memory: bool = hp.optional(\"Whether to copy Tensors into CUDA pinned memory before returning them\",\n default=True)\n timeout: float = hp.optional(\"Timeout, in seconds, for collecting a batch from workers. Set to 0 for no timeout\",\n default=0)\n\n def initialize_object(\n self,\n dataset: Dataset,\n *,\n batch_size: int,\n sampler: Optional[torch.utils.data.Sampler[int]],\n drop_last: bool,\n collate_fn: Optional[Callable] = None,\n worker_init_fn: Optional[Callable] = None,\n ) -> DataLoader:\n \"\"\"Create a dataloader.\n\n Args:\n dataset (Dataset): The dataset.\n batch_size (int): The per-device batch size.\n sampler (torch.utils.data.Sampler[int] or None): The sampler to use for the dataloader.\n drop_last (bool): Whether to drop the last batch if the number of\n samples is not evenly divisible by the batch size.\n collate_fn (callable, optional): Custom collate function. Defaults to None.\n worker_init_fn (callable, optional): Custom worker init function. Defaults to None.\n\n Returns:\n DataLoader: The dataloader.\n \"\"\"\n\n return torch.utils.data.DataLoader(dataset,\n batch_size=batch_size,\n num_workers=self.num_workers,\n pin_memory=self.pin_memory,\n drop_last=drop_last,\n sampler=sampler,\n collate_fn=collate_fn,\n worker_init_fn=worker_init_fn,\n timeout=self.timeout,\n prefetch_factor=self.prefetch_factor,\n persistent_workers=self.persistent_workers)\n", "path": "composer/datasets/dataloader.py"}]} | 2,252 | 934 |
gh_patches_debug_37164 | rasdani/github-patches | git_diff | PokemonGoF__PokemonGo-Bot-5695 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Loitering does not seem to work properly
<!--
STOP ! ! !
Read the following before creating anything (or you will have your issue/feature request closed without notice)
1. Please only create an ISSUE or a FEATURE REQUEST - don't mix the two together in one item
2. For a Feature Request please only fill out the FEATURE REQUEST section
3. For a Issue please only fill out the ISSUE section
4. Issues are NOT to be used for help/config problems/support - use the relevant slack channels as per the README
5. Provide a good summary in the title, don't just write problem, or awesome idea!
6. Delete all irrelevant sections not related to your issue/feature request (including this top section)
===============ISSUE SECTION===============
Before you create an Issue, please check the following:
1. Have you validated that your config.json is valid JSON? Use http://jsonlint.com/ to check.
2. Have you [searched our issue tracker](https://github.com/PokemonGoF/PokemonGo-Bot/issues?q=is%3Aissue+sort%3Aupdated-desc) to see if the issue already exists? If so, comment on that issue instead rather than creating a new issue.
3. Are you running on the `master` branch? We work on the `dev` branch and then add that functionality to `master` when it is stable. Your issue may be fixed on `dev` and there is no need for this issue, just wait and it will eventually be merged to `master`.
4. All Issue sections MUST be completed to help us determine the actual problem and find its cause
-->
### Expected Behavior
<!-- Tell us what you expect to happen -->
Run the bot, go to the next point as in my path file, then, while loitering, move to forts, spin the pokestop and catch pokemons.
### Actual Behavior
<!-- Tell us what is happening -->
only while is following a path spin the pokestops, while is loitering it does nothing.
### Your FULL config.json (remove your username, password, gmapkey and any other private info)
<!-- Provide your FULL config file, feel free to use services such as pastebin.com to reduce clutter -->
http://pastebin.com/HDWecKDg
### Output when issue occurred
<!-- Provide a reasonable sample from your output log (not just the error message), feel free to use services such as pastebin.com to reduce clutter -->
http://pastebin.com/2fY2XXBC
in the line 40 and 426 start to loiter.
### Steps to Reproduce
<!-- Tell us the steps you have taken to reproduce the issue -->
./run.sh
### Other Information
OS: Ubuntu 16.04
<!-- Tell us what Operating system you're using -->
Branch: master
<!-- dev or master -->
Git Commit: a8ee31256d412413b107cce81b62059634e8c802
<!-- run 'git log -n 1 --pretty=format:"%H"' -->
Python Version: Python 2.7.12
<!-- run 'python -V' and paste it here) -->
Any other relevant files/configs (eg: path files)
<!-- Anything else which may be of relevance -->
path file
[
{"location": "40.7814675, -73.9741015, 33", "loiter": 300},
{"location": "40.7795502, -73.9632225, 46", "loiter": 300},
{"location": "40.7739931, -73.9665484, 23", "loiter": 300},
{"location": "40.7741881, -73.9707756, 25", "loiter": 300},
{"location": "40.7757805, -73.9717841, 26", "loiter": 300},
{"location": "40.7772429, -73.9712047, 29", "loiter": 300},
{"location": "40.7678016, -73.9717411, 18", "loiter": 300},
{"location": "40.7663878, -73.9732003, 21", "loiter": 300},
{"location": "40.7647626, -73.9732003, 32", "loiter": 300},
{"location": "40.7653802, -73.9750671, 26", "loiter": 300},
{"location": "40.7665015, -73.9748954, 14", "loiter": 300},
{"location": "40.7762842, -73.9740157, 32", "loiter": 300}
]
<!-- ===============END OF ISSUE SECTION=============== -->
<!-- Note: Delete these lines and everything BELOW if creating an Issue -->
</issue>
<code>
[start of pokemongo_bot/cell_workers/follow_path.py]
1 # -*- coding: utf-8 -*-
2
3 from __future__ import absolute_import
4 import gpxpy
5 import gpxpy.gpx
6 import json
7 import time
8 from pokemongo_bot.base_task import BaseTask
9 from pokemongo_bot.cell_workers.utils import distance
10 from pokemongo_bot.cell_workers.utils import format_dist
11 from pokemongo_bot.cell_workers.utils import i2f
12 from pokemongo_bot.human_behaviour import sleep
13 from pokemongo_bot.walkers.walker_factory import walker_factory
14 from pokemongo_bot.worker_result import WorkerResult
15 from pgoapi.utilities import f2i
16 from random import uniform
17 from .utils import getSeconds, format_dist
18 from datetime import datetime as dt, timedelta
19
20 STATUS_MOVING = 0
21 STATUS_LOITERING = 1
22 STATUS_FINISHED = 2
23
24 class FollowPath(BaseTask):
25 SUPPORTED_TASK_API_VERSION = 1
26
27 def initialize(self):
28 self._process_config()
29 self.points = self.load_path()
30 self.status = STATUS_MOVING
31 self.loiter_end_time = 0
32 self.distance_unit = self.bot.config.distance_unit
33 self.append_unit = False
34
35 if self.path_start_mode == 'closest':
36 self.ptr = self.find_closest_point_idx(self.points)
37
38 else:
39 self.ptr = 0
40
41 def _process_config(self):
42 self.path_file = self.config.get("path_file", None)
43 self.path_mode = self.config.get("path_mode", "linear")
44 self.path_start_mode = self.config.get("path_start_mode", "first")
45 self.number_lap_max = self.config.get("number_lap", -1) # if < 0, then the number is inf.
46 self.timer_restart_min = getSeconds(self.config.get("timer_restart_min", "00:20:00"))
47 self.timer_restart_max = getSeconds(self.config.get("timer_restart_max", "02:00:00"))
48 self.walker = self.config.get('walker', 'StepWalker')
49
50 if self.timer_restart_min > self.timer_restart_max:
51 raise ValueError('path timer_restart_min is bigger than path timer_restart_max') #TODO there must be a more elegant way to do it...
52
53 #var not related to configs
54 self.number_lap = 0
55
56 def load_path(self):
57 if self.path_file is None:
58 raise RuntimeError('You need to specify a path file (json or gpx)')
59
60 if self.path_file.endswith('.json'):
61 return self.load_json()
62 elif self.path_file.endswith('.gpx'):
63 return self.load_gpx()
64
65 def load_json(self):
66 with open(self.path_file) as data_file:
67 points=json.load(data_file)
68 # Replace Verbal Location with lat&lng.
69 for _, point in enumerate(points):
70 point_tuple = self.bot.get_pos_by_name(point['location'])
71 self.emit_event(
72 'location_found',
73 level='debug',
74 formatted="Location found: {location} {position}",
75 data={
76 'location': point,
77 'position': point_tuple
78 }
79 )
80 # Keep point['location']
81 point["lat"] = float(point_tuple[0])
82 point["lng"] = float(point_tuple[1])
83 point["alt"] = float(point_tuple[2])
84 return points
85
86 def load_gpx(self):
87 gpx_file = open(self.path_file, 'r')
88 gpx = gpxpy.parse(gpx_file)
89
90 if len(gpx.tracks) == 0:
91 raise RuntimeError('GPX file does not contain a track')
92
93 points = []
94 track = gpx.tracks[0]
95 for segment in track.segments:
96 for point in segment.points:
97 points.append({"lat": point.latitude, "lng": point.longitude,
98 "alt": point.elevation, "location": point.name})
99
100 return points
101
102 def find_closest_point_idx(self, points):
103 return_idx = 0
104 min_distance = float("inf");
105
106 for index in range(len(points)):
107 point = points[index]
108 lat = point['lat']
109 lng = point['lng']
110
111 dist = distance(
112 self.bot.position[0],
113 self.bot.position[1],
114 lat,
115 lng
116 )
117
118 if dist < min_distance:
119 min_distance = dist
120 return_idx = index
121
122 return return_idx
123
124 def endLaps(self):
125 duration = int(uniform(self.timer_restart_min, self.timer_restart_max))
126 resume = dt.now() + timedelta(seconds=duration)
127
128 self.emit_event(
129 'path_lap_end',
130 formatted="Great job, lot of calories burned! Taking a break now for {duration}, will resume at {resume}.",
131 data={
132 'duration': str(timedelta(seconds=duration)),
133 'resume': resume.strftime("%H:%M:%S")
134 }
135 )
136
137 self.number_lap = 0 # at the end of the break, start again
138 sleep(duration)
139 self.bot.login()
140
141 def work(self):
142 # If done or loitering allow the next task to run
143 if self.status == STATUS_FINISHED:
144 return WorkerResult.SUCCESS
145
146 if self.status == STATUS_LOITERING and time.time() < self.loiter_end_time:
147 return WorkerResult.RUNNING
148
149 last_lat, last_lng, last_alt = self.bot.position
150
151 point = self.points[self.ptr]
152 lat = point['lat']
153 lng = point['lng']
154
155 if 'alt' in point:
156 alt = float(point['alt'])
157 else:
158 alt = uniform(self.bot.config.alt_min, self.bot.config.alt_max)
159
160 if self.bot.config.walk_max > 0:
161 step_walker = walker_factory(self.walker,
162 self.bot,
163 lat,
164 lng,
165 alt
166 )
167
168 is_at_destination = False
169 if step_walker.step():
170 is_at_destination = True
171
172 else:
173 self.bot.api.set_position(lat, lng, alt)
174
175 dist = distance(
176 last_lat,
177 last_lng,
178 lat,
179 lng
180 )
181
182 self.emit_event(
183 'position_update',
184 formatted="Walking from {last_position} to {current_position}, distance left: ({distance} {distance_unit}) ..",
185 data={
186 'last_position': (last_lat, last_lng, last_alt),
187 'current_position': (lat, lng, alt),
188 'distance': format_dist(dist,self.distance_unit,self.append_unit),
189 'distance_unit': self.distance_unit
190 }
191 )
192
193 if (self.bot.config.walk_min > 0 and is_at_destination) or (self.status == STATUS_LOITERING and time.time() >= self.loiter_end_time):
194 if "loiter" in point and self.status != STATUS_LOITERING:
195 self.logger.info("Loitering for {} seconds...".format(point["loiter"]))
196 self.status = STATUS_LOITERING
197 self.loiter_end_time = time.time() + point["loiter"]
198 return WorkerResult.RUNNING
199 if (self.ptr + 1) == len(self.points):
200 if self.path_mode == 'single':
201 self.status = STATUS_FINISHED
202 return WorkerResult.SUCCESS
203 self.ptr = 0
204 if self.path_mode == 'linear':
205 self.points = list(reversed(self.points))
206 if self.number_lap_max >= 0:
207 self.number_lap+=1
208 self.emit_event(
209 'path_lap_update',
210 formatted="number lap : {number_lap} / {number_lap_max}",
211 data={
212 'number_lap': str(self.number_lap),
213 'number_lap_max': str(self.number_lap_max)
214 }
215 )
216 if self.number_lap >= self.number_lap_max:
217 self.endLaps()
218 else:
219 self.ptr += 1
220
221 self.status = STATUS_MOVING
222 return WorkerResult.RUNNING
223
[end of pokemongo_bot/cell_workers/follow_path.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pokemongo_bot/cell_workers/follow_path.py b/pokemongo_bot/cell_workers/follow_path.py
--- a/pokemongo_bot/cell_workers/follow_path.py
+++ b/pokemongo_bot/cell_workers/follow_path.py
@@ -18,7 +18,7 @@
from datetime import datetime as dt, timedelta
STATUS_MOVING = 0
-STATUS_LOITERING = 1
+STATUS_WANDERING = 1
STATUS_FINISHED = 2
class FollowPath(BaseTask):
@@ -28,7 +28,7 @@
self._process_config()
self.points = self.load_path()
self.status = STATUS_MOVING
- self.loiter_end_time = 0
+ self.wander_end_time = 0
self.distance_unit = self.bot.config.distance_unit
self.append_unit = False
@@ -139,12 +139,12 @@
self.bot.login()
def work(self):
- # If done or loitering allow the next task to run
+ # If done or wandering allow the next task to run
if self.status == STATUS_FINISHED:
return WorkerResult.SUCCESS
- if self.status == STATUS_LOITERING and time.time() < self.loiter_end_time:
- return WorkerResult.RUNNING
+ if self.status == STATUS_WANDERING and time.time() < self.wander_end_time:
+ return WorkerResult.SUCCESS
last_lat, last_lng, last_alt = self.bot.position
@@ -190,12 +190,14 @@
}
)
- if (self.bot.config.walk_min > 0 and is_at_destination) or (self.status == STATUS_LOITERING and time.time() >= self.loiter_end_time):
- if "loiter" in point and self.status != STATUS_LOITERING:
- self.logger.info("Loitering for {} seconds...".format(point["loiter"]))
- self.status = STATUS_LOITERING
- self.loiter_end_time = time.time() + point["loiter"]
- return WorkerResult.RUNNING
+ if (self.bot.config.walk_min > 0 and is_at_destination) or (self.status == STATUS_WANDERING and time.time() >= self.wander_end_time):
+ if "loiter" in point:
+ self.logger.warning("'loiter' is obsolete, please change to 'wander' in {}".format(self.path_file))
+ if "wander" in point and self.status != STATUS_WANDERING:
+ self.logger.info("Wandering for {} seconds...".format(point["wander"]))
+ self.status = STATUS_WANDERING
+ self.wander_end_time = time.time() + point["wander"]
+ return WorkerResult.SUCCESS
if (self.ptr + 1) == len(self.points):
if self.path_mode == 'single':
self.status = STATUS_FINISHED
| {"golden_diff": "diff --git a/pokemongo_bot/cell_workers/follow_path.py b/pokemongo_bot/cell_workers/follow_path.py\n--- a/pokemongo_bot/cell_workers/follow_path.py\n+++ b/pokemongo_bot/cell_workers/follow_path.py\n@@ -18,7 +18,7 @@\n from datetime import datetime as dt, timedelta\n \n STATUS_MOVING = 0\n-STATUS_LOITERING = 1\n+STATUS_WANDERING = 1\n STATUS_FINISHED = 2\n \n class FollowPath(BaseTask):\n@@ -28,7 +28,7 @@\n self._process_config()\n self.points = self.load_path()\n self.status = STATUS_MOVING\n- self.loiter_end_time = 0\n+ self.wander_end_time = 0\n self.distance_unit = self.bot.config.distance_unit\n self.append_unit = False\n \n@@ -139,12 +139,12 @@\n self.bot.login()\n \n def work(self):\n- # If done or loitering allow the next task to run\n+ # If done or wandering allow the next task to run\n if self.status == STATUS_FINISHED:\n return WorkerResult.SUCCESS\n \n- if self.status == STATUS_LOITERING and time.time() < self.loiter_end_time:\n- return WorkerResult.RUNNING\n+ if self.status == STATUS_WANDERING and time.time() < self.wander_end_time:\n+ return WorkerResult.SUCCESS\n \n last_lat, last_lng, last_alt = self.bot.position\n \n@@ -190,12 +190,14 @@\n }\n )\n \n- if (self.bot.config.walk_min > 0 and is_at_destination) or (self.status == STATUS_LOITERING and time.time() >= self.loiter_end_time):\n- if \"loiter\" in point and self.status != STATUS_LOITERING:\n- self.logger.info(\"Loitering for {} seconds...\".format(point[\"loiter\"]))\n- self.status = STATUS_LOITERING\n- self.loiter_end_time = time.time() + point[\"loiter\"]\n- return WorkerResult.RUNNING\n+ if (self.bot.config.walk_min > 0 and is_at_destination) or (self.status == STATUS_WANDERING and time.time() >= self.wander_end_time):\n+ if \"loiter\" in point:\n+ self.logger.warning(\"'loiter' is obsolete, please change to 'wander' in {}\".format(self.path_file))\n+ if \"wander\" in point and self.status != STATUS_WANDERING:\n+ self.logger.info(\"Wandering for {} seconds...\".format(point[\"wander\"]))\n+ self.status = STATUS_WANDERING\n+ self.wander_end_time = time.time() + point[\"wander\"]\n+ return WorkerResult.SUCCESS\n if (self.ptr + 1) == len(self.points):\n if self.path_mode == 'single':\n self.status = STATUS_FINISHED\n", "issue": "Loitering does not seem to work properly\n<!--\n\nSTOP ! ! !\n\nRead the following before creating anything (or you will have your issue/feature request closed without notice)\n\n1. Please only create an ISSUE or a FEATURE REQUEST - don't mix the two together in one item\n2. For a Feature Request please only fill out the FEATURE REQUEST section\n3. For a Issue please only fill out the ISSUE section\n4. Issues are NOT to be used for help/config problems/support - use the relevant slack channels as per the README\n5. Provide a good summary in the title, don't just write problem, or awesome idea!\n6. Delete all irrelevant sections not related to your issue/feature request (including this top section)\n\n===============ISSUE SECTION===============\n\nBefore you create an Issue, please check the following:\n\n1. Have you validated that your config.json is valid JSON? Use http://jsonlint.com/ to check.\n2. Have you [searched our issue tracker](https://github.com/PokemonGoF/PokemonGo-Bot/issues?q=is%3Aissue+sort%3Aupdated-desc) to see if the issue already exists? If so, comment on that issue instead rather than creating a new issue.\n3. Are you running on the `master` branch? We work on the `dev` branch and then add that functionality to `master` when it is stable. Your issue may be fixed on `dev` and there is no need for this issue, just wait and it will eventually be merged to `master`.\n4. All Issue sections MUST be completed to help us determine the actual problem and find its cause\n-->\n### Expected Behavior\n\n<!-- Tell us what you expect to happen -->\n\nRun the bot, go to the next point as in my path file, then, while loitering, move to forts, spin the pokestop and catch pokemons.\n### Actual Behavior\n\n<!-- Tell us what is happening -->\n\nonly while is following a path spin the pokestops, while is loitering it does nothing.\n### Your FULL config.json (remove your username, password, gmapkey and any other private info)\n\n<!-- Provide your FULL config file, feel free to use services such as pastebin.com to reduce clutter -->\n\nhttp://pastebin.com/HDWecKDg\n### Output when issue occurred\n\n<!-- Provide a reasonable sample from your output log (not just the error message), feel free to use services such as pastebin.com to reduce clutter -->\n\nhttp://pastebin.com/2fY2XXBC\nin the line 40 and 426 start to loiter.\n### Steps to Reproduce\n\n<!-- Tell us the steps you have taken to reproduce the issue -->\n\n./run.sh\n### Other Information\n\nOS: Ubuntu 16.04\n\n<!-- Tell us what Operating system you're using --> \n\nBranch: master\n\n<!-- dev or master --> \n\nGit Commit: a8ee31256d412413b107cce81b62059634e8c802\n\n<!-- run 'git log -n 1 --pretty=format:\"%H\"' --> \n\nPython Version: Python 2.7.12\n\n<!-- run 'python -V' and paste it here) --> \n\nAny other relevant files/configs (eg: path files) \n\n<!-- Anything else which may be of relevance -->\n\npath file\n[\n {\"location\": \"40.7814675, -73.9741015, 33\", \"loiter\": 300},\n {\"location\": \"40.7795502, -73.9632225, 46\", \"loiter\": 300},\n {\"location\": \"40.7739931, -73.9665484, 23\", \"loiter\": 300},\n {\"location\": \"40.7741881, -73.9707756, 25\", \"loiter\": 300},\n {\"location\": \"40.7757805, -73.9717841, 26\", \"loiter\": 300},\n {\"location\": \"40.7772429, -73.9712047, 29\", \"loiter\": 300},\n {\"location\": \"40.7678016, -73.9717411, 18\", \"loiter\": 300},\n {\"location\": \"40.7663878, -73.9732003, 21\", \"loiter\": 300},\n {\"location\": \"40.7647626, -73.9732003, 32\", \"loiter\": 300},\n {\"location\": \"40.7653802, -73.9750671, 26\", \"loiter\": 300},\n {\"location\": \"40.7665015, -73.9748954, 14\", \"loiter\": 300},\n {\"location\": \"40.7762842, -73.9740157, 32\", \"loiter\": 300}\n]\n\n<!-- ===============END OF ISSUE SECTION=============== -->\n\n<!-- Note: Delete these lines and everything BELOW if creating an Issue -->\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nimport gpxpy\nimport gpxpy.gpx\nimport json\nimport time\nfrom pokemongo_bot.base_task import BaseTask\nfrom pokemongo_bot.cell_workers.utils import distance\nfrom pokemongo_bot.cell_workers.utils import format_dist\nfrom pokemongo_bot.cell_workers.utils import i2f\nfrom pokemongo_bot.human_behaviour import sleep\nfrom pokemongo_bot.walkers.walker_factory import walker_factory\nfrom pokemongo_bot.worker_result import WorkerResult\nfrom pgoapi.utilities import f2i\nfrom random import uniform\nfrom .utils import getSeconds, format_dist\nfrom datetime import datetime as dt, timedelta\n\nSTATUS_MOVING = 0\nSTATUS_LOITERING = 1\nSTATUS_FINISHED = 2\n \nclass FollowPath(BaseTask):\n SUPPORTED_TASK_API_VERSION = 1\n\n def initialize(self):\n self._process_config()\n self.points = self.load_path()\n self.status = STATUS_MOVING\n self.loiter_end_time = 0\n self.distance_unit = self.bot.config.distance_unit\n self.append_unit = False\n\n if self.path_start_mode == 'closest':\n self.ptr = self.find_closest_point_idx(self.points)\n\n else:\n self.ptr = 0\n\n def _process_config(self):\n self.path_file = self.config.get(\"path_file\", None)\n self.path_mode = self.config.get(\"path_mode\", \"linear\")\n self.path_start_mode = self.config.get(\"path_start_mode\", \"first\")\n self.number_lap_max = self.config.get(\"number_lap\", -1) # if < 0, then the number is inf.\n self.timer_restart_min = getSeconds(self.config.get(\"timer_restart_min\", \"00:20:00\"))\n self.timer_restart_max = getSeconds(self.config.get(\"timer_restart_max\", \"02:00:00\"))\n self.walker = self.config.get('walker', 'StepWalker')\n\n if self.timer_restart_min > self.timer_restart_max:\n raise ValueError('path timer_restart_min is bigger than path timer_restart_max') #TODO there must be a more elegant way to do it...\n \n #var not related to configs\n self.number_lap = 0\n \n def load_path(self):\n if self.path_file is None:\n raise RuntimeError('You need to specify a path file (json or gpx)')\n\n if self.path_file.endswith('.json'):\n return self.load_json()\n elif self.path_file.endswith('.gpx'):\n return self.load_gpx()\n\n def load_json(self):\n with open(self.path_file) as data_file:\n points=json.load(data_file)\n # Replace Verbal Location with lat&lng.\n for _, point in enumerate(points):\n point_tuple = self.bot.get_pos_by_name(point['location'])\n self.emit_event(\n 'location_found',\n level='debug',\n formatted=\"Location found: {location} {position}\",\n data={\n 'location': point,\n 'position': point_tuple\n }\n )\n # Keep point['location']\n point[\"lat\"] = float(point_tuple[0])\n point[\"lng\"] = float(point_tuple[1])\n point[\"alt\"] = float(point_tuple[2])\n return points\n\n def load_gpx(self):\n gpx_file = open(self.path_file, 'r')\n gpx = gpxpy.parse(gpx_file)\n\n if len(gpx.tracks) == 0:\n raise RuntimeError('GPX file does not contain a track')\n\n points = []\n track = gpx.tracks[0]\n for segment in track.segments:\n for point in segment.points:\n points.append({\"lat\": point.latitude, \"lng\": point.longitude,\n \"alt\": point.elevation, \"location\": point.name})\n\n return points\n\n def find_closest_point_idx(self, points):\n return_idx = 0\n min_distance = float(\"inf\");\n \n for index in range(len(points)):\n point = points[index]\n lat = point['lat']\n lng = point['lng']\n\n dist = distance(\n self.bot.position[0],\n self.bot.position[1],\n lat,\n lng\n )\n\n if dist < min_distance:\n min_distance = dist\n return_idx = index\n\n return return_idx\n\n def endLaps(self):\n duration = int(uniform(self.timer_restart_min, self.timer_restart_max))\n resume = dt.now() + timedelta(seconds=duration)\n \n self.emit_event(\n 'path_lap_end',\n formatted=\"Great job, lot of calories burned! Taking a break now for {duration}, will resume at {resume}.\",\n data={\n 'duration': str(timedelta(seconds=duration)),\n 'resume': resume.strftime(\"%H:%M:%S\")\n }\n )\n \n self.number_lap = 0 # at the end of the break, start again\n sleep(duration)\n self.bot.login()\n\n def work(self):\n # If done or loitering allow the next task to run\n if self.status == STATUS_FINISHED:\n return WorkerResult.SUCCESS\n\n if self.status == STATUS_LOITERING and time.time() < self.loiter_end_time:\n return WorkerResult.RUNNING\n\n last_lat, last_lng, last_alt = self.bot.position\n\n point = self.points[self.ptr]\n lat = point['lat']\n lng = point['lng']\n\n if 'alt' in point:\n alt = float(point['alt'])\n else:\n alt = uniform(self.bot.config.alt_min, self.bot.config.alt_max)\n\n if self.bot.config.walk_max > 0:\n step_walker = walker_factory(self.walker,\n self.bot,\n lat,\n lng,\n alt\n )\n\n is_at_destination = False\n if step_walker.step():\n is_at_destination = True\n\n else:\n self.bot.api.set_position(lat, lng, alt)\n\n dist = distance(\n last_lat,\n last_lng,\n lat,\n lng\n )\n\n self.emit_event(\n 'position_update',\n formatted=\"Walking from {last_position} to {current_position}, distance left: ({distance} {distance_unit}) ..\",\n data={\n 'last_position': (last_lat, last_lng, last_alt),\n 'current_position': (lat, lng, alt),\n 'distance': format_dist(dist,self.distance_unit,self.append_unit),\n 'distance_unit': self.distance_unit\n }\n )\n \n if (self.bot.config.walk_min > 0 and is_at_destination) or (self.status == STATUS_LOITERING and time.time() >= self.loiter_end_time):\n if \"loiter\" in point and self.status != STATUS_LOITERING:\n self.logger.info(\"Loitering for {} seconds...\".format(point[\"loiter\"]))\n self.status = STATUS_LOITERING\n self.loiter_end_time = time.time() + point[\"loiter\"]\n return WorkerResult.RUNNING\n if (self.ptr + 1) == len(self.points):\n if self.path_mode == 'single':\n self.status = STATUS_FINISHED\n return WorkerResult.SUCCESS\n self.ptr = 0\n if self.path_mode == 'linear':\n self.points = list(reversed(self.points))\n if self.number_lap_max >= 0:\n self.number_lap+=1\n self.emit_event(\n 'path_lap_update',\n formatted=\"number lap : {number_lap} / {number_lap_max}\",\n data={\n 'number_lap': str(self.number_lap),\n 'number_lap_max': str(self.number_lap_max)\n }\n )\n if self.number_lap >= self.number_lap_max:\n self.endLaps()\n else:\n self.ptr += 1\n \n self.status = STATUS_MOVING\n return WorkerResult.RUNNING\n", "path": "pokemongo_bot/cell_workers/follow_path.py"}]} | 4,032 | 642 |
gh_patches_debug_43705 | rasdani/github-patches | git_diff | iterative__dvc-1399 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Handle KeyboardInterrupt gracefully
Currently, when you send a `SIGINT` to a running instance of DVC, it doesn't terminate properly.
```bash
dvc run 'sleep 999s'
# Ctrl+C
# or
# pkill -INT dvc
```
Error:
```
Traceback (most recent call last):
File "/home/mroutis/src/iterative/dvc/.venv/bin/dvc", line 11, in <module>
load_entry_point('dvc', 'console_scripts', 'dvc')()
File "/home/mroutis/src/iterative/dvc/dvc/main.py", line 22, in main
ret = cmd.run_cmd()
File "/home/mroutis/src/iterative/dvc/dvc/command/base.py", line 41, in run_cmd
return self.run()
File "/home/mroutis/src/iterative/dvc/dvc/command/run.py", line 34, in run
no_exec=self.args.no_exec)
File "/home/mroutis/src/iterative/dvc/dvc/project.py", line 355, in run
stage.run()
File "/home/mroutis/src/iterative/dvc/dvc/stage.py", line 488, in run
self._run()
File "/home/mroutis/src/iterative/dvc/dvc/stage.py", line 460, in _run
p.communicate()
File "/usr/lib/python3.7/subprocess.py", line 925, in communicate
self.wait()
File "/usr/lib/python3.7/subprocess.py", line 984, in wait
return self._wait(timeout=timeout)
File "/usr/lib/python3.7/subprocess.py", line 1618, in _wait
(pid, sts) = self._try_wait(0)
File "/usr/lib/python3.7/subprocess.py", line 1576, in _try_wait
(pid, sts) = os.waitpid(self.pid, wait_flags)
KeyboardInterrupt
```
</issue>
<code>
[start of dvc/logger.py]
1 import sys
2 import logging
3 import colorama
4 import traceback
5 import re
6
7
8 colorama.init()
9
10
11 def visual_width(line):
12 """ Get the the number of columns required to display a string """
13
14 return len(re.sub(colorama.ansitowin32.AnsiToWin32.ANSI_CSI_RE, '', line))
15
16
17 def visual_center(line, width):
18 """ Center align string according to it's visual width """
19
20 spaces = max(width - visual_width(line), 0)
21 left_padding = int(spaces / 2)
22 right_padding = (spaces - left_padding)
23
24 return (left_padding * ' ') + line + (right_padding * ' ')
25
26
27 class Logger(object):
28 FMT = '%(message)s'
29 DEFAULT_LEVEL = logging.INFO
30
31 LEVEL_MAP = {
32 'debug': logging.DEBUG,
33 'info': logging.INFO,
34 'warn': logging.WARNING,
35 'error': logging.ERROR
36 }
37
38 COLOR_MAP = {
39 'green': colorama.Fore.GREEN,
40 'yellow': colorama.Fore.YELLOW,
41 'blue': colorama.Fore.BLUE,
42 'red': colorama.Fore.RED,
43 }
44
45 LEVEL_COLOR_MAP = {
46 'debug': 'blue',
47 'warn': 'yellow',
48 'error': 'red',
49 }
50
51 def __init__(self, loglevel=None):
52 if loglevel:
53 Logger.set_level(loglevel)
54
55 @staticmethod
56 def init():
57
58 class LogLevelFilter(logging.Filter):
59 def filter(self, record):
60 return record.levelno <= logging.WARNING
61
62 sh_out = logging.StreamHandler(sys.stdout)
63 sh_out.setFormatter(logging.Formatter(Logger.FMT))
64 sh_out.setLevel(logging.DEBUG)
65 sh_out.addFilter(LogLevelFilter())
66
67 sh_err = logging.StreamHandler(sys.stderr)
68 sh_err.setFormatter(logging.Formatter(Logger.FMT))
69 sh_err.setLevel(logging.ERROR)
70
71 Logger.logger().addHandler(sh_out)
72 Logger.logger().addHandler(sh_err)
73 Logger.set_level()
74
75 @staticmethod
76 def logger():
77 return logging.getLogger('dvc')
78
79 @staticmethod
80 def set_level(level=None):
81 if not level:
82 lvl = Logger.DEFAULT_LEVEL
83 else:
84 lvl = Logger.LEVEL_MAP.get(level.lower(), Logger.DEFAULT_LEVEL)
85 Logger.logger().setLevel(lvl)
86
87 @staticmethod
88 def be_quiet():
89 Logger.logger().setLevel(logging.CRITICAL)
90
91 @staticmethod
92 def be_verbose():
93 Logger.logger().setLevel(logging.DEBUG)
94
95 @staticmethod
96 def colorize(msg, color):
97 header = ''
98 footer = ''
99
100 if sys.stdout.isatty(): # pragma: no cover
101 header = Logger.COLOR_MAP.get(color.lower(), '')
102 footer = colorama.Style.RESET_ALL
103
104 return u'{}{}{}'.format(header, msg, footer)
105
106 @staticmethod
107 def parse_exc(exc, tb=None):
108 str_tb = tb if tb else None
109 str_exc = ': {}'.format(str(exc)) if exc else ""
110
111 if exc and hasattr(exc, 'cause') and exc.cause:
112 cause_tb = exc.cause_tb if hasattr(exc, 'cause_tb') else None
113 cause_str_exc, cause_str_tb = Logger.parse_exc(exc.cause, cause_tb)
114
115 str_tb = cause_str_tb
116 str_exc = '{}{}'.format(str_exc, cause_str_exc)
117
118 return (str_exc, str_tb)
119
120 @staticmethod
121 def _prefix(msg, typ):
122 color = Logger.LEVEL_COLOR_MAP.get(typ.lower(), '')
123 return Logger.colorize('{}: '.format(msg), color)
124
125 @staticmethod
126 def error_prefix():
127 return Logger._prefix('Error', 'error')
128
129 @staticmethod
130 def warning_prefix():
131 return Logger._prefix('Warning', 'warn')
132
133 @staticmethod
134 def debug_prefix():
135 return Logger._prefix('Debug', 'debug')
136
137 @staticmethod
138 def _with_progress(func, msg):
139 from dvc.progress import progress
140 with progress:
141 func(msg)
142
143 @staticmethod
144 def _error_exc(exc):
145 if exc is None:
146 return
147
148 if Logger.logger().getEffectiveLevel() != logging.DEBUG:
149 return
150
151 prefix = Logger.error_prefix()
152 str_exc, str_tb = Logger.parse_exc(exc)
153 str_tb = str_tb if str_tb else traceback.format_exc()
154 Logger._with_progress(Logger.logger().error, prefix + str_tb)
155
156 @staticmethod
157 def _with_exc(func, msg, suffix="", exc=None):
158 Logger._error_exc(exc)
159 msg = msg + Logger.parse_exc(exc)[0] + suffix
160 Logger._with_progress(func, msg)
161
162 @staticmethod
163 def error(msg, exc=None):
164 chat = "\n\nHaving any troubles? Hit us up at dvc.org/support, " \
165 "we are always happy to help!"
166 Logger._with_exc(Logger.logger().error,
167 Logger.error_prefix() + msg,
168 suffix=chat,
169 exc=exc)
170
171 @classmethod
172 def warn(cls, msg, exc=None):
173 cls._with_exc(cls.logger().warning,
174 cls.warning_prefix() + msg,
175 exc=exc)
176
177 @classmethod
178 def debug(cls, msg, exc=None):
179 cls._with_exc(cls.logger().debug,
180 cls.debug_prefix() + msg,
181 exc=exc)
182
183 @staticmethod
184 def info(msg):
185 Logger._with_progress(Logger.logger().info, msg)
186
187 @staticmethod
188 def is_quiet():
189 return Logger.logger().level == logging.CRITICAL
190
191 @classmethod
192 def box(cls, msg, border_color=''):
193 if cls.is_quiet():
194 return
195
196 lines = msg.split('\n')
197 max_width = max(visual_width(line) for line in lines)
198
199 padding_horizontal = 5
200 padding_vertical = 1
201
202 box_size_horizontal = (max_width + (padding_horizontal * 2))
203
204 chars = {
205 'corner': '+',
206 'horizontal': '-',
207 'vertical': '|',
208 'empty': ' ',
209 }
210
211 margin = "{corner}{line}{corner}\n".format(
212 corner=chars['corner'],
213 line=chars['horizontal'] * box_size_horizontal,
214 )
215
216 padding_lines = [
217 "{border}{space}{border}\n".format(
218 border=cls.colorize(chars['vertical'], border_color),
219 space=chars['empty'] * box_size_horizontal,
220 ) * padding_vertical
221 ]
222
223 content_lines = [
224 "{border}{space}{content}{space}{border}\n".format(
225 border=cls.colorize(chars['vertical'], border_color),
226 space=chars['empty'] * padding_horizontal,
227 content=visual_center(line, max_width),
228 ) for line in lines
229 ]
230
231 box = "{margin}{padding}{content}{padding}{margin}".format(
232 margin=cls.colorize(margin, border_color),
233 padding=''.join(padding_lines),
234 content=''.join(content_lines),
235 )
236
237 print(box)
238
[end of dvc/logger.py]
[start of dvc/main.py]
1 from dvc.logger import Logger
2 from dvc.cli import parse_args
3 from dvc.command.base import CmdBase
4 from dvc.analytics import Analytics
5 from dvc.exceptions import NotDvcProjectError, DvcParserError
6
7
8 def main(argv=None):
9 Logger.init()
10
11 args = None
12 cmd = None
13 try:
14 args = parse_args(argv)
15
16 # Init loglevel early in case we'll run
17 # into errors before setting it properly
18 CmdBase._set_loglevel(args)
19
20 cmd = args.func(args)
21
22 ret = cmd.run_cmd()
23 except NotDvcProjectError as ex:
24 Logger.error(str(ex))
25 ret = 253
26 except DvcParserError:
27 ret = 254
28 except Exception as ex:
29 Logger.error('Unexpected error', ex)
30 ret = 255
31
32 Analytics().send_cmd(cmd, args, ret)
33
34 return ret
35
[end of dvc/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dvc/logger.py b/dvc/logger.py
--- a/dvc/logger.py
+++ b/dvc/logger.py
@@ -106,21 +106,26 @@
@staticmethod
def parse_exc(exc, tb=None):
str_tb = tb if tb else None
- str_exc = ': {}'.format(str(exc)) if exc else ""
+ str_exc = str(exc) if exc else ""
+ l_str_exc = []
+
+ if len(str_exc) != 0:
+ l_str_exc.append(str_exc)
if exc and hasattr(exc, 'cause') and exc.cause:
cause_tb = exc.cause_tb if hasattr(exc, 'cause_tb') else None
- cause_str_exc, cause_str_tb = Logger.parse_exc(exc.cause, cause_tb)
+ l_cause_str_exc, cause_str_tb = Logger.parse_exc(exc.cause,
+ cause_tb)
str_tb = cause_str_tb
- str_exc = '{}{}'.format(str_exc, cause_str_exc)
+ l_str_exc += l_cause_str_exc
- return (str_exc, str_tb)
+ return (l_str_exc, str_tb)
@staticmethod
def _prefix(msg, typ):
color = Logger.LEVEL_COLOR_MAP.get(typ.lower(), '')
- return Logger.colorize('{}: '.format(msg), color)
+ return Logger.colorize('{}'.format(msg), color)
@staticmethod
def error_prefix():
@@ -141,43 +146,42 @@
func(msg)
@staticmethod
- def _error_exc(exc):
- if exc is None:
- return
+ def _with_exc(func, prefix, msg, suffix="", exc=None):
+ l_str_exc, str_tb = Logger.parse_exc(exc)
- if Logger.logger().getEffectiveLevel() != logging.DEBUG:
- return
+ if exc is not None and Logger.is_verbose():
+ str_tb = str_tb if str_tb else traceback.format_exc()
+ Logger._with_progress(Logger.logger().error, str_tb)
- prefix = Logger.error_prefix()
- str_exc, str_tb = Logger.parse_exc(exc)
- str_tb = str_tb if str_tb else traceback.format_exc()
- Logger._with_progress(Logger.logger().error, prefix + str_tb)
+ l_msg = [prefix]
+ if msg is not None and len(msg) != 0:
+ l_msg.append(msg)
+ l_msg += l_str_exc
- @staticmethod
- def _with_exc(func, msg, suffix="", exc=None):
- Logger._error_exc(exc)
- msg = msg + Logger.parse_exc(exc)[0] + suffix
- Logger._with_progress(func, msg)
+ Logger._with_progress(func, ': '.join(l_msg) + suffix)
@staticmethod
def error(msg, exc=None):
chat = "\n\nHaving any troubles? Hit us up at dvc.org/support, " \
"we are always happy to help!"
Logger._with_exc(Logger.logger().error,
- Logger.error_prefix() + msg,
+ Logger.error_prefix(),
+ msg,
suffix=chat,
exc=exc)
@classmethod
def warn(cls, msg, exc=None):
cls._with_exc(cls.logger().warning,
- cls.warning_prefix() + msg,
+ cls.warning_prefix(),
+ msg,
exc=exc)
@classmethod
def debug(cls, msg, exc=None):
cls._with_exc(cls.logger().debug,
- cls.debug_prefix() + msg,
+ cls.debug_prefix(),
+ msg,
exc=exc)
@staticmethod
@@ -188,6 +192,10 @@
def is_quiet():
return Logger.logger().level == logging.CRITICAL
+ @staticmethod
+ def is_verbose():
+ return Logger.logger().getEffectiveLevel() == logging.DEBUG
+
@classmethod
def box(cls, msg, border_color=''):
if cls.is_quiet():
diff --git a/dvc/main.py b/dvc/main.py
--- a/dvc/main.py
+++ b/dvc/main.py
@@ -20,8 +20,11 @@
cmd = args.func(args)
ret = cmd.run_cmd()
+ except KeyboardInterrupt as ex:
+ Logger.error("Interrupted by the user", ex)
+ ret = 252
except NotDvcProjectError as ex:
- Logger.error(str(ex))
+ Logger.error("", ex)
ret = 253
except DvcParserError:
ret = 254
| {"golden_diff": "diff --git a/dvc/logger.py b/dvc/logger.py\n--- a/dvc/logger.py\n+++ b/dvc/logger.py\n@@ -106,21 +106,26 @@\n @staticmethod\n def parse_exc(exc, tb=None):\n str_tb = tb if tb else None\n- str_exc = ': {}'.format(str(exc)) if exc else \"\"\n+ str_exc = str(exc) if exc else \"\"\n+ l_str_exc = []\n+\n+ if len(str_exc) != 0:\n+ l_str_exc.append(str_exc)\n \n if exc and hasattr(exc, 'cause') and exc.cause:\n cause_tb = exc.cause_tb if hasattr(exc, 'cause_tb') else None\n- cause_str_exc, cause_str_tb = Logger.parse_exc(exc.cause, cause_tb)\n+ l_cause_str_exc, cause_str_tb = Logger.parse_exc(exc.cause,\n+ cause_tb)\n \n str_tb = cause_str_tb\n- str_exc = '{}{}'.format(str_exc, cause_str_exc)\n+ l_str_exc += l_cause_str_exc\n \n- return (str_exc, str_tb)\n+ return (l_str_exc, str_tb)\n \n @staticmethod\n def _prefix(msg, typ):\n color = Logger.LEVEL_COLOR_MAP.get(typ.lower(), '')\n- return Logger.colorize('{}: '.format(msg), color)\n+ return Logger.colorize('{}'.format(msg), color)\n \n @staticmethod\n def error_prefix():\n@@ -141,43 +146,42 @@\n func(msg)\n \n @staticmethod\n- def _error_exc(exc):\n- if exc is None:\n- return\n+ def _with_exc(func, prefix, msg, suffix=\"\", exc=None):\n+ l_str_exc, str_tb = Logger.parse_exc(exc)\n \n- if Logger.logger().getEffectiveLevel() != logging.DEBUG:\n- return\n+ if exc is not None and Logger.is_verbose():\n+ str_tb = str_tb if str_tb else traceback.format_exc()\n+ Logger._with_progress(Logger.logger().error, str_tb)\n \n- prefix = Logger.error_prefix()\n- str_exc, str_tb = Logger.parse_exc(exc)\n- str_tb = str_tb if str_tb else traceback.format_exc()\n- Logger._with_progress(Logger.logger().error, prefix + str_tb)\n+ l_msg = [prefix]\n+ if msg is not None and len(msg) != 0:\n+ l_msg.append(msg)\n+ l_msg += l_str_exc\n \n- @staticmethod\n- def _with_exc(func, msg, suffix=\"\", exc=None):\n- Logger._error_exc(exc)\n- msg = msg + Logger.parse_exc(exc)[0] + suffix\n- Logger._with_progress(func, msg)\n+ Logger._with_progress(func, ': '.join(l_msg) + suffix)\n \n @staticmethod\n def error(msg, exc=None):\n chat = \"\\n\\nHaving any troubles? Hit us up at dvc.org/support, \" \\\n \"we are always happy to help!\"\n Logger._with_exc(Logger.logger().error,\n- Logger.error_prefix() + msg,\n+ Logger.error_prefix(),\n+ msg,\n suffix=chat,\n exc=exc)\n \n @classmethod\n def warn(cls, msg, exc=None):\n cls._with_exc(cls.logger().warning,\n- cls.warning_prefix() + msg,\n+ cls.warning_prefix(),\n+ msg,\n exc=exc)\n \n @classmethod\n def debug(cls, msg, exc=None):\n cls._with_exc(cls.logger().debug,\n- cls.debug_prefix() + msg,\n+ cls.debug_prefix(),\n+ msg,\n exc=exc)\n \n @staticmethod\n@@ -188,6 +192,10 @@\n def is_quiet():\n return Logger.logger().level == logging.CRITICAL\n \n+ @staticmethod\n+ def is_verbose():\n+ return Logger.logger().getEffectiveLevel() == logging.DEBUG\n+\n @classmethod\n def box(cls, msg, border_color=''):\n if cls.is_quiet():\ndiff --git a/dvc/main.py b/dvc/main.py\n--- a/dvc/main.py\n+++ b/dvc/main.py\n@@ -20,8 +20,11 @@\n cmd = args.func(args)\n \n ret = cmd.run_cmd()\n+ except KeyboardInterrupt as ex:\n+ Logger.error(\"Interrupted by the user\", ex)\n+ ret = 252\n except NotDvcProjectError as ex:\n- Logger.error(str(ex))\n+ Logger.error(\"\", ex)\n ret = 253\n except DvcParserError:\n ret = 254\n", "issue": "Handle KeyboardInterrupt gracefully\nCurrently, when you send a `SIGINT` to a running instance of DVC, it doesn't terminate properly.\r\n\r\n```bash\r\ndvc run 'sleep 999s'\r\n# Ctrl+C\r\n# or\r\n# pkill -INT dvc\r\n```\r\n\r\nError:\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/mroutis/src/iterative/dvc/.venv/bin/dvc\", line 11, in <module>\r\n load_entry_point('dvc', 'console_scripts', 'dvc')()\r\n File \"/home/mroutis/src/iterative/dvc/dvc/main.py\", line 22, in main\r\n ret = cmd.run_cmd()\r\n File \"/home/mroutis/src/iterative/dvc/dvc/command/base.py\", line 41, in run_cmd\r\n return self.run()\r\n File \"/home/mroutis/src/iterative/dvc/dvc/command/run.py\", line 34, in run\r\n no_exec=self.args.no_exec)\r\n File \"/home/mroutis/src/iterative/dvc/dvc/project.py\", line 355, in run\r\n stage.run()\r\n File \"/home/mroutis/src/iterative/dvc/dvc/stage.py\", line 488, in run\r\n self._run()\r\n File \"/home/mroutis/src/iterative/dvc/dvc/stage.py\", line 460, in _run\r\n p.communicate()\r\n File \"/usr/lib/python3.7/subprocess.py\", line 925, in communicate\r\n self.wait()\r\n File \"/usr/lib/python3.7/subprocess.py\", line 984, in wait\r\n return self._wait(timeout=timeout)\r\n File \"/usr/lib/python3.7/subprocess.py\", line 1618, in _wait\r\n (pid, sts) = self._try_wait(0)\r\n File \"/usr/lib/python3.7/subprocess.py\", line 1576, in _try_wait\r\n (pid, sts) = os.waitpid(self.pid, wait_flags)\r\nKeyboardInterrupt\r\n```\r\n\r\n\n", "before_files": [{"content": "import sys\nimport logging\nimport colorama\nimport traceback\nimport re\n\n\ncolorama.init()\n\n\ndef visual_width(line):\n \"\"\" Get the the number of columns required to display a string \"\"\"\n\n return len(re.sub(colorama.ansitowin32.AnsiToWin32.ANSI_CSI_RE, '', line))\n\n\ndef visual_center(line, width):\n \"\"\" Center align string according to it's visual width \"\"\"\n\n spaces = max(width - visual_width(line), 0)\n left_padding = int(spaces / 2)\n right_padding = (spaces - left_padding)\n\n return (left_padding * ' ') + line + (right_padding * ' ')\n\n\nclass Logger(object):\n FMT = '%(message)s'\n DEFAULT_LEVEL = logging.INFO\n\n LEVEL_MAP = {\n 'debug': logging.DEBUG,\n 'info': logging.INFO,\n 'warn': logging.WARNING,\n 'error': logging.ERROR\n }\n\n COLOR_MAP = {\n 'green': colorama.Fore.GREEN,\n 'yellow': colorama.Fore.YELLOW,\n 'blue': colorama.Fore.BLUE,\n 'red': colorama.Fore.RED,\n }\n\n LEVEL_COLOR_MAP = {\n 'debug': 'blue',\n 'warn': 'yellow',\n 'error': 'red',\n }\n\n def __init__(self, loglevel=None):\n if loglevel:\n Logger.set_level(loglevel)\n\n @staticmethod\n def init():\n\n class LogLevelFilter(logging.Filter):\n def filter(self, record):\n return record.levelno <= logging.WARNING\n\n sh_out = logging.StreamHandler(sys.stdout)\n sh_out.setFormatter(logging.Formatter(Logger.FMT))\n sh_out.setLevel(logging.DEBUG)\n sh_out.addFilter(LogLevelFilter())\n\n sh_err = logging.StreamHandler(sys.stderr)\n sh_err.setFormatter(logging.Formatter(Logger.FMT))\n sh_err.setLevel(logging.ERROR)\n\n Logger.logger().addHandler(sh_out)\n Logger.logger().addHandler(sh_err)\n Logger.set_level()\n\n @staticmethod\n def logger():\n return logging.getLogger('dvc')\n\n @staticmethod\n def set_level(level=None):\n if not level:\n lvl = Logger.DEFAULT_LEVEL\n else:\n lvl = Logger.LEVEL_MAP.get(level.lower(), Logger.DEFAULT_LEVEL)\n Logger.logger().setLevel(lvl)\n\n @staticmethod\n def be_quiet():\n Logger.logger().setLevel(logging.CRITICAL)\n\n @staticmethod\n def be_verbose():\n Logger.logger().setLevel(logging.DEBUG)\n\n @staticmethod\n def colorize(msg, color):\n header = ''\n footer = ''\n\n if sys.stdout.isatty(): # pragma: no cover\n header = Logger.COLOR_MAP.get(color.lower(), '')\n footer = colorama.Style.RESET_ALL\n\n return u'{}{}{}'.format(header, msg, footer)\n\n @staticmethod\n def parse_exc(exc, tb=None):\n str_tb = tb if tb else None\n str_exc = ': {}'.format(str(exc)) if exc else \"\"\n\n if exc and hasattr(exc, 'cause') and exc.cause:\n cause_tb = exc.cause_tb if hasattr(exc, 'cause_tb') else None\n cause_str_exc, cause_str_tb = Logger.parse_exc(exc.cause, cause_tb)\n\n str_tb = cause_str_tb\n str_exc = '{}{}'.format(str_exc, cause_str_exc)\n\n return (str_exc, str_tb)\n\n @staticmethod\n def _prefix(msg, typ):\n color = Logger.LEVEL_COLOR_MAP.get(typ.lower(), '')\n return Logger.colorize('{}: '.format(msg), color)\n\n @staticmethod\n def error_prefix():\n return Logger._prefix('Error', 'error')\n\n @staticmethod\n def warning_prefix():\n return Logger._prefix('Warning', 'warn')\n\n @staticmethod\n def debug_prefix():\n return Logger._prefix('Debug', 'debug')\n\n @staticmethod\n def _with_progress(func, msg):\n from dvc.progress import progress\n with progress:\n func(msg)\n\n @staticmethod\n def _error_exc(exc):\n if exc is None:\n return\n\n if Logger.logger().getEffectiveLevel() != logging.DEBUG:\n return\n\n prefix = Logger.error_prefix()\n str_exc, str_tb = Logger.parse_exc(exc)\n str_tb = str_tb if str_tb else traceback.format_exc()\n Logger._with_progress(Logger.logger().error, prefix + str_tb)\n\n @staticmethod\n def _with_exc(func, msg, suffix=\"\", exc=None):\n Logger._error_exc(exc)\n msg = msg + Logger.parse_exc(exc)[0] + suffix\n Logger._with_progress(func, msg)\n\n @staticmethod\n def error(msg, exc=None):\n chat = \"\\n\\nHaving any troubles? Hit us up at dvc.org/support, \" \\\n \"we are always happy to help!\"\n Logger._with_exc(Logger.logger().error,\n Logger.error_prefix() + msg,\n suffix=chat,\n exc=exc)\n\n @classmethod\n def warn(cls, msg, exc=None):\n cls._with_exc(cls.logger().warning,\n cls.warning_prefix() + msg,\n exc=exc)\n\n @classmethod\n def debug(cls, msg, exc=None):\n cls._with_exc(cls.logger().debug,\n cls.debug_prefix() + msg,\n exc=exc)\n\n @staticmethod\n def info(msg):\n Logger._with_progress(Logger.logger().info, msg)\n\n @staticmethod\n def is_quiet():\n return Logger.logger().level == logging.CRITICAL\n\n @classmethod\n def box(cls, msg, border_color=''):\n if cls.is_quiet():\n return\n\n lines = msg.split('\\n')\n max_width = max(visual_width(line) for line in lines)\n\n padding_horizontal = 5\n padding_vertical = 1\n\n box_size_horizontal = (max_width + (padding_horizontal * 2))\n\n chars = {\n 'corner': '+',\n 'horizontal': '-',\n 'vertical': '|',\n 'empty': ' ',\n }\n\n margin = \"{corner}{line}{corner}\\n\".format(\n corner=chars['corner'],\n line=chars['horizontal'] * box_size_horizontal,\n )\n\n padding_lines = [\n \"{border}{space}{border}\\n\".format(\n border=cls.colorize(chars['vertical'], border_color),\n space=chars['empty'] * box_size_horizontal,\n ) * padding_vertical\n ]\n\n content_lines = [\n \"{border}{space}{content}{space}{border}\\n\".format(\n border=cls.colorize(chars['vertical'], border_color),\n space=chars['empty'] * padding_horizontal,\n content=visual_center(line, max_width),\n ) for line in lines\n ]\n\n box = \"{margin}{padding}{content}{padding}{margin}\".format(\n margin=cls.colorize(margin, border_color),\n padding=''.join(padding_lines),\n content=''.join(content_lines),\n )\n\n print(box)\n", "path": "dvc/logger.py"}, {"content": "from dvc.logger import Logger\nfrom dvc.cli import parse_args\nfrom dvc.command.base import CmdBase\nfrom dvc.analytics import Analytics\nfrom dvc.exceptions import NotDvcProjectError, DvcParserError\n\n\ndef main(argv=None):\n Logger.init()\n\n args = None\n cmd = None\n try:\n args = parse_args(argv)\n\n # Init loglevel early in case we'll run\n # into errors before setting it properly\n CmdBase._set_loglevel(args)\n\n cmd = args.func(args)\n\n ret = cmd.run_cmd()\n except NotDvcProjectError as ex:\n Logger.error(str(ex))\n ret = 253\n except DvcParserError:\n ret = 254\n except Exception as ex:\n Logger.error('Unexpected error', ex)\n ret = 255\n\n Analytics().send_cmd(cmd, args, ret)\n\n return ret\n", "path": "dvc/main.py"}]} | 3,398 | 1,015 |
gh_patches_debug_22103 | rasdani/github-patches | git_diff | open-mmlab__mmengine-262 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
用户设置完了paramwise_cfg,如何知道是否符合预期,这个是否也要提供相应的脚本,用户运行后可以很容易的知道哪些参数被 frozen,不太参数组超参的不同。暂时没有时间开发的话,可以作为未来一个需求吧
用户设置完了,如何知道是否符合预期,这个是否也要提供相应的脚本,用户运行后可以很容易的知道哪些参数被 frozen,不太参数组超参的不同。暂时没有时间开发的话,可以作为未来一个需求吧
_Originally posted by @hhaAndroid in https://github.com/open-mmlab/mmengine/pull/25#discussion_r809786760_
</issue>
<code>
[start of mmengine/optim/optimizer/default_constructor.py]
1 # Copyright (c) OpenMMLab. All rights reserved.
2 import warnings
3 from typing import List, Optional, Union
4
5 import torch
6 import torch.nn as nn
7 from torch.nn import GroupNorm, LayerNorm
8
9 from mmengine.registry import OPTIMIZER_CONSTRUCTORS, OPTIMIZERS
10 from mmengine.utils import is_list_of, mmcv_full_available
11 from mmengine.utils.parrots_wrapper import _BatchNorm, _InstanceNorm
12
13
14 @OPTIMIZER_CONSTRUCTORS.register_module()
15 class DefaultOptimizerConstructor:
16 """Default constructor for optimizers.
17
18 By default each parameter share the same optimizer settings, and we
19 provide an argument ``paramwise_cfg`` to specify parameter-wise settings.
20 It is a dict and may contain the following fields:
21
22 - ``custom_keys`` (dict): Specified parameters-wise settings by keys. If
23 one of the keys in ``custom_keys`` is a substring of the name of one
24 parameter, then the setting of the parameter will be specified by
25 ``custom_keys[key]`` and other setting like ``bias_lr_mult`` etc. will
26 be ignored. It should be noted that the aforementioned ``key`` is the
27 longest key that is a substring of the name of the parameter. If there
28 are multiple matched keys with the same length, then the key with lower
29 alphabet order will be chosen.
30 ``custom_keys[key]`` should be a dict and may contain fields ``lr_mult``
31 and ``decay_mult``. See Example 2 below.
32 - ``bias_lr_mult`` (float): It will be multiplied to the learning
33 rate for all bias parameters (except for those in normalization
34 layers and offset layers of DCN).
35 - ``bias_decay_mult`` (float): It will be multiplied to the weight
36 decay for all bias parameters (except for those in
37 normalization layers, depthwise conv layers, offset layers of DCN).
38 - ``norm_decay_mult`` (float): It will be multiplied to the weight
39 decay for all weight and bias parameters of normalization
40 layers.
41 - ``dwconv_decay_mult`` (float): It will be multiplied to the weight
42 decay for all weight and bias parameters of depthwise conv
43 layers.
44 - ``dcn_offset_lr_mult`` (float): It will be multiplied to the learning
45 rate for parameters of offset layer in the deformable convs
46 of a model.
47 - ``bypass_duplicate`` (bool): If true, the duplicate parameters
48 would not be added into optimizer. Default: False.
49
50 Note:
51
52 1. If the option ``dcn_offset_lr_mult`` is used, the constructor will
53 override the effect of ``bias_lr_mult`` in the bias of offset layer.
54 So be careful when using both ``bias_lr_mult`` and
55 ``dcn_offset_lr_mult``. If you wish to apply both of them to the offset
56 layer in deformable convs, set ``dcn_offset_lr_mult`` to the original
57 ``dcn_offset_lr_mult`` * ``bias_lr_mult``.
58
59 2. If the option ``dcn_offset_lr_mult`` is used, the constructor will
60 apply it to all the DCN layers in the model. So be careful when the
61 model contains multiple DCN layers in places other than backbone.
62
63 Args:
64 optimizer_cfg (dict): The config dict of the optimizer.
65 Positional fields are
66
67 - `type`: class name of the optimizer.
68
69 Optional fields are
70
71 - any arguments of the corresponding optimizer type, e.g.,
72 lr, weight_decay, momentum, etc.
73 paramwise_cfg (dict, optional): Parameter-wise options.
74
75 Example 1:
76 >>> model = torch.nn.modules.Conv1d(1, 1, 1)
77 >>> optimizer_cfg = dict(type='SGD', lr=0.01, momentum=0.9,
78 >>> weight_decay=0.0001)
79 >>> paramwise_cfg = dict(norm_decay_mult=0.)
80 >>> optim_builder = DefaultOptimizerConstructor(
81 >>> optimizer_cfg, paramwise_cfg)
82 >>> optimizer = optim_builder(model)
83
84 Example 2:
85 >>> # assume model have attribute model.backbone and model.cls_head
86 >>> optimizer_cfg = dict(type='SGD', lr=0.01, weight_decay=0.95)
87 >>> paramwise_cfg = dict(custom_keys={
88 '.backbone': dict(lr_mult=0.1, decay_mult=0.9)})
89 >>> optim_builder = DefaultOptimizerConstructor(
90 >>> optimizer_cfg, paramwise_cfg)
91 >>> optimizer = optim_builder(model)
92 >>> # Then the `lr` and `weight_decay` for model.backbone is
93 >>> # (0.01 * 0.1, 0.95 * 0.9). `lr` and `weight_decay` for
94 >>> # model.cls_head is (0.01, 0.95).
95 """
96
97 def __init__(self,
98 optimizer_cfg: dict,
99 paramwise_cfg: Optional[dict] = None):
100 if not isinstance(optimizer_cfg, dict):
101 raise TypeError('optimizer_cfg should be a dict',
102 f'but got {type(optimizer_cfg)}')
103 self.optimizer_cfg = optimizer_cfg
104 self.paramwise_cfg = {} if paramwise_cfg is None else paramwise_cfg
105 self.base_lr = optimizer_cfg.get('lr', None)
106 self.base_wd = optimizer_cfg.get('weight_decay', None)
107 self._validate_cfg()
108
109 def _validate_cfg(self) -> None:
110 """verify the correctness of the config."""
111 if not isinstance(self.paramwise_cfg, dict):
112 raise TypeError('paramwise_cfg should be None or a dict, '
113 f'but got {type(self.paramwise_cfg)}')
114
115 if 'custom_keys' in self.paramwise_cfg:
116 if not isinstance(self.paramwise_cfg['custom_keys'], dict):
117 raise TypeError(
118 'If specified, custom_keys must be a dict, '
119 f'but got {type(self.paramwise_cfg["custom_keys"])}')
120 if self.base_wd is None:
121 for key in self.paramwise_cfg['custom_keys']:
122 if 'decay_mult' in self.paramwise_cfg['custom_keys'][key]:
123 raise ValueError('base_wd should not be None')
124
125 # get base lr and weight decay
126 # weight_decay must be explicitly specified if mult is specified
127 if ('bias_decay_mult' in self.paramwise_cfg
128 or 'norm_decay_mult' in self.paramwise_cfg
129 or 'dwconv_decay_mult' in self.paramwise_cfg):
130 if self.base_wd is None:
131 raise ValueError('base_wd should not be None')
132
133 def _is_in(self, param_group: dict, param_group_list: list) -> bool:
134 """check whether the `param_group` is in the`param_group_list`"""
135 assert is_list_of(param_group_list, dict)
136 param = set(param_group['params'])
137 param_set = set()
138 for group in param_group_list:
139 param_set.update(set(group['params']))
140
141 return not param.isdisjoint(param_set)
142
143 def add_params(self,
144 params: List[dict],
145 module: nn.Module,
146 prefix: str = '',
147 is_dcn_module: Optional[Union[int, float]] = None) -> None:
148 """Add all parameters of module to the params list.
149
150 The parameters of the given module will be added to the list of param
151 groups, with specific rules defined by paramwise_cfg.
152
153 Args:
154 params (list[dict]): A list of param groups, it will be modified
155 in place.
156 module (nn.Module): The module to be added.
157 prefix (str): The prefix of the module
158 is_dcn_module (int|float|None): If the current module is a
159 submodule of DCN, `is_dcn_module` will be passed to
160 control conv_offset layer's learning rate. Defaults to None.
161 """
162 # get param-wise options
163 custom_keys = self.paramwise_cfg.get('custom_keys', {})
164 # first sort with alphabet order and then sort with reversed len of str
165 sorted_keys = sorted(sorted(custom_keys.keys()), key=len, reverse=True)
166
167 bias_lr_mult = self.paramwise_cfg.get('bias_lr_mult', 1.)
168 bias_decay_mult = self.paramwise_cfg.get('bias_decay_mult', 1.)
169 norm_decay_mult = self.paramwise_cfg.get('norm_decay_mult', 1.)
170 dwconv_decay_mult = self.paramwise_cfg.get('dwconv_decay_mult', 1.)
171 bypass_duplicate = self.paramwise_cfg.get('bypass_duplicate', False)
172 dcn_offset_lr_mult = self.paramwise_cfg.get('dcn_offset_lr_mult', 1.)
173
174 # special rules for norm layers and depth-wise conv layers
175 is_norm = isinstance(module,
176 (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm))
177 is_dwconv = (
178 isinstance(module, torch.nn.Conv2d)
179 and module.in_channels == module.groups)
180
181 for name, param in module.named_parameters(recurse=False):
182 param_group = {'params': [param]}
183 if not param.requires_grad:
184 params.append(param_group)
185 continue
186 if bypass_duplicate and self._is_in(param_group, params):
187 warnings.warn(f'{prefix} is duplicate. It is skipped since '
188 f'bypass_duplicate={bypass_duplicate}')
189 continue
190 # if the parameter match one of the custom keys, ignore other rules
191 is_custom = False
192 for key in sorted_keys:
193 if key in f'{prefix}.{name}':
194 is_custom = True
195 lr_mult = custom_keys[key].get('lr_mult', 1.)
196 param_group['lr'] = self.base_lr * lr_mult
197 if self.base_wd is not None:
198 decay_mult = custom_keys[key].get('decay_mult', 1.)
199 param_group['weight_decay'] = self.base_wd * decay_mult
200 break
201
202 if not is_custom:
203 # bias_lr_mult affects all bias parameters
204 # except for norm.bias dcn.conv_offset.bias
205 if name == 'bias' and not (is_norm or is_dcn_module):
206 param_group['lr'] = self.base_lr * bias_lr_mult
207
208 if (prefix.find('conv_offset') != -1 and is_dcn_module
209 and isinstance(module, torch.nn.Conv2d)):
210 # deal with both dcn_offset's bias & weight
211 param_group['lr'] = self.base_lr * dcn_offset_lr_mult
212
213 # apply weight decay policies
214 if self.base_wd is not None:
215 # norm decay
216 if is_norm:
217 param_group[
218 'weight_decay'] = self.base_wd * norm_decay_mult
219 # depth-wise conv
220 elif is_dwconv:
221 param_group[
222 'weight_decay'] = self.base_wd * dwconv_decay_mult
223 # bias lr and decay
224 elif name == 'bias' and not is_dcn_module:
225 # TODO: current bias_decay_mult will have affect on DCN
226 param_group[
227 'weight_decay'] = self.base_wd * bias_decay_mult
228 params.append(param_group)
229
230 if mmcv_full_available():
231 from mmcv.ops import DeformConv2d, ModulatedDeformConv2d
232 is_dcn_module = isinstance(module,
233 (DeformConv2d, ModulatedDeformConv2d))
234 else:
235 is_dcn_module = False
236 for child_name, child_mod in module.named_children():
237 child_prefix = f'{prefix}.{child_name}' if prefix else child_name
238 self.add_params(
239 params,
240 child_mod,
241 prefix=child_prefix,
242 is_dcn_module=is_dcn_module)
243
244 def __call__(self, model: nn.Module) -> torch.optim.Optimizer:
245 if hasattr(model, 'module'):
246 model = model.module
247
248 optimizer_cfg = self.optimizer_cfg.copy()
249 # if no paramwise option is specified, just use the global setting
250 if not self.paramwise_cfg:
251 optimizer_cfg['params'] = model.parameters()
252 return OPTIMIZERS.build(optimizer_cfg)
253
254 # set param-wise lr and weight decay recursively
255 params: List = []
256 self.add_params(params, model)
257 optimizer_cfg['params'] = params
258
259 return OPTIMIZERS.build(optimizer_cfg)
260
[end of mmengine/optim/optimizer/default_constructor.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mmengine/optim/optimizer/default_constructor.py b/mmengine/optim/optimizer/default_constructor.py
--- a/mmengine/optim/optimizer/default_constructor.py
+++ b/mmengine/optim/optimizer/default_constructor.py
@@ -5,6 +5,7 @@
import torch
import torch.nn as nn
from torch.nn import GroupNorm, LayerNorm
+from mmengine.mmengine.logging.logger import print_log
from mmengine.registry import OPTIMIZER_CONSTRUCTORS, OPTIMIZERS
from mmengine.utils import is_list_of, mmcv_full_available
@@ -226,6 +227,13 @@
param_group[
'weight_decay'] = self.base_wd * bias_decay_mult
params.append(param_group)
+ for key, value in param_group.items():
+ if key == 'params':
+ continue
+ full_name = f'{prefix}.{name}' if prefix else name
+ print_log(
+ f'paramwise_options -- {full_name}:{key}={value}',
+ logger='current')
if mmcv_full_available():
from mmcv.ops import DeformConv2d, ModulatedDeformConv2d
| {"golden_diff": "diff --git a/mmengine/optim/optimizer/default_constructor.py b/mmengine/optim/optimizer/default_constructor.py\n--- a/mmengine/optim/optimizer/default_constructor.py\n+++ b/mmengine/optim/optimizer/default_constructor.py\n@@ -5,6 +5,7 @@\n import torch\n import torch.nn as nn\n from torch.nn import GroupNorm, LayerNorm\n+from mmengine.mmengine.logging.logger import print_log\n \n from mmengine.registry import OPTIMIZER_CONSTRUCTORS, OPTIMIZERS\n from mmengine.utils import is_list_of, mmcv_full_available\n@@ -226,6 +227,13 @@\n param_group[\n 'weight_decay'] = self.base_wd * bias_decay_mult\n params.append(param_group)\n+ for key, value in param_group.items():\n+ if key == 'params':\n+ continue\n+ full_name = f'{prefix}.{name}' if prefix else name\n+ print_log(\n+ f'paramwise_options -- {full_name}:{key}={value}',\n+ logger='current')\n \n if mmcv_full_available():\n from mmcv.ops import DeformConv2d, ModulatedDeformConv2d\n", "issue": "\u7528\u6237\u8bbe\u7f6e\u5b8c\u4e86paramwise_cfg\uff0c\u5982\u4f55\u77e5\u9053\u662f\u5426\u7b26\u5408\u9884\u671f\uff0c\u8fd9\u4e2a\u662f\u5426\u4e5f\u8981\u63d0\u4f9b\u76f8\u5e94\u7684\u811a\u672c\uff0c\u7528\u6237\u8fd0\u884c\u540e\u53ef\u4ee5\u5f88\u5bb9\u6613\u7684\u77e5\u9053\u54ea\u4e9b\u53c2\u6570\u88ab frozen\uff0c\u4e0d\u592a\u53c2\u6570\u7ec4\u8d85\u53c2\u7684\u4e0d\u540c\u3002\u6682\u65f6\u6ca1\u6709\u65f6\u95f4\u5f00\u53d1\u7684\u8bdd\uff0c\u53ef\u4ee5\u4f5c\u4e3a\u672a\u6765\u4e00\u4e2a\u9700\u6c42\u5427\n\u7528\u6237\u8bbe\u7f6e\u5b8c\u4e86\uff0c\u5982\u4f55\u77e5\u9053\u662f\u5426\u7b26\u5408\u9884\u671f\uff0c\u8fd9\u4e2a\u662f\u5426\u4e5f\u8981\u63d0\u4f9b\u76f8\u5e94\u7684\u811a\u672c\uff0c\u7528\u6237\u8fd0\u884c\u540e\u53ef\u4ee5\u5f88\u5bb9\u6613\u7684\u77e5\u9053\u54ea\u4e9b\u53c2\u6570\u88ab frozen\uff0c\u4e0d\u592a\u53c2\u6570\u7ec4\u8d85\u53c2\u7684\u4e0d\u540c\u3002\u6682\u65f6\u6ca1\u6709\u65f6\u95f4\u5f00\u53d1\u7684\u8bdd\uff0c\u53ef\u4ee5\u4f5c\u4e3a\u672a\u6765\u4e00\u4e2a\u9700\u6c42\u5427\r\n\r\n_Originally posted by @hhaAndroid in https://github.com/open-mmlab/mmengine/pull/25#discussion_r809786760_\n", "before_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\nfrom typing import List, Optional, Union\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import GroupNorm, LayerNorm\n\nfrom mmengine.registry import OPTIMIZER_CONSTRUCTORS, OPTIMIZERS\nfrom mmengine.utils import is_list_of, mmcv_full_available\nfrom mmengine.utils.parrots_wrapper import _BatchNorm, _InstanceNorm\n\n\n@OPTIMIZER_CONSTRUCTORS.register_module()\nclass DefaultOptimizerConstructor:\n \"\"\"Default constructor for optimizers.\n\n By default each parameter share the same optimizer settings, and we\n provide an argument ``paramwise_cfg`` to specify parameter-wise settings.\n It is a dict and may contain the following fields:\n\n - ``custom_keys`` (dict): Specified parameters-wise settings by keys. If\n one of the keys in ``custom_keys`` is a substring of the name of one\n parameter, then the setting of the parameter will be specified by\n ``custom_keys[key]`` and other setting like ``bias_lr_mult`` etc. will\n be ignored. It should be noted that the aforementioned ``key`` is the\n longest key that is a substring of the name of the parameter. If there\n are multiple matched keys with the same length, then the key with lower\n alphabet order will be chosen.\n ``custom_keys[key]`` should be a dict and may contain fields ``lr_mult``\n and ``decay_mult``. See Example 2 below.\n - ``bias_lr_mult`` (float): It will be multiplied to the learning\n rate for all bias parameters (except for those in normalization\n layers and offset layers of DCN).\n - ``bias_decay_mult`` (float): It will be multiplied to the weight\n decay for all bias parameters (except for those in\n normalization layers, depthwise conv layers, offset layers of DCN).\n - ``norm_decay_mult`` (float): It will be multiplied to the weight\n decay for all weight and bias parameters of normalization\n layers.\n - ``dwconv_decay_mult`` (float): It will be multiplied to the weight\n decay for all weight and bias parameters of depthwise conv\n layers.\n - ``dcn_offset_lr_mult`` (float): It will be multiplied to the learning\n rate for parameters of offset layer in the deformable convs\n of a model.\n - ``bypass_duplicate`` (bool): If true, the duplicate parameters\n would not be added into optimizer. Default: False.\n\n Note:\n\n 1. If the option ``dcn_offset_lr_mult`` is used, the constructor will\n override the effect of ``bias_lr_mult`` in the bias of offset layer.\n So be careful when using both ``bias_lr_mult`` and\n ``dcn_offset_lr_mult``. If you wish to apply both of them to the offset\n layer in deformable convs, set ``dcn_offset_lr_mult`` to the original\n ``dcn_offset_lr_mult`` * ``bias_lr_mult``.\n\n 2. If the option ``dcn_offset_lr_mult`` is used, the constructor will\n apply it to all the DCN layers in the model. So be careful when the\n model contains multiple DCN layers in places other than backbone.\n\n Args:\n optimizer_cfg (dict): The config dict of the optimizer.\n Positional fields are\n\n - `type`: class name of the optimizer.\n\n Optional fields are\n\n - any arguments of the corresponding optimizer type, e.g.,\n lr, weight_decay, momentum, etc.\n paramwise_cfg (dict, optional): Parameter-wise options.\n\n Example 1:\n >>> model = torch.nn.modules.Conv1d(1, 1, 1)\n >>> optimizer_cfg = dict(type='SGD', lr=0.01, momentum=0.9,\n >>> weight_decay=0.0001)\n >>> paramwise_cfg = dict(norm_decay_mult=0.)\n >>> optim_builder = DefaultOptimizerConstructor(\n >>> optimizer_cfg, paramwise_cfg)\n >>> optimizer = optim_builder(model)\n\n Example 2:\n >>> # assume model have attribute model.backbone and model.cls_head\n >>> optimizer_cfg = dict(type='SGD', lr=0.01, weight_decay=0.95)\n >>> paramwise_cfg = dict(custom_keys={\n '.backbone': dict(lr_mult=0.1, decay_mult=0.9)})\n >>> optim_builder = DefaultOptimizerConstructor(\n >>> optimizer_cfg, paramwise_cfg)\n >>> optimizer = optim_builder(model)\n >>> # Then the `lr` and `weight_decay` for model.backbone is\n >>> # (0.01 * 0.1, 0.95 * 0.9). `lr` and `weight_decay` for\n >>> # model.cls_head is (0.01, 0.95).\n \"\"\"\n\n def __init__(self,\n optimizer_cfg: dict,\n paramwise_cfg: Optional[dict] = None):\n if not isinstance(optimizer_cfg, dict):\n raise TypeError('optimizer_cfg should be a dict',\n f'but got {type(optimizer_cfg)}')\n self.optimizer_cfg = optimizer_cfg\n self.paramwise_cfg = {} if paramwise_cfg is None else paramwise_cfg\n self.base_lr = optimizer_cfg.get('lr', None)\n self.base_wd = optimizer_cfg.get('weight_decay', None)\n self._validate_cfg()\n\n def _validate_cfg(self) -> None:\n \"\"\"verify the correctness of the config.\"\"\"\n if not isinstance(self.paramwise_cfg, dict):\n raise TypeError('paramwise_cfg should be None or a dict, '\n f'but got {type(self.paramwise_cfg)}')\n\n if 'custom_keys' in self.paramwise_cfg:\n if not isinstance(self.paramwise_cfg['custom_keys'], dict):\n raise TypeError(\n 'If specified, custom_keys must be a dict, '\n f'but got {type(self.paramwise_cfg[\"custom_keys\"])}')\n if self.base_wd is None:\n for key in self.paramwise_cfg['custom_keys']:\n if 'decay_mult' in self.paramwise_cfg['custom_keys'][key]:\n raise ValueError('base_wd should not be None')\n\n # get base lr and weight decay\n # weight_decay must be explicitly specified if mult is specified\n if ('bias_decay_mult' in self.paramwise_cfg\n or 'norm_decay_mult' in self.paramwise_cfg\n or 'dwconv_decay_mult' in self.paramwise_cfg):\n if self.base_wd is None:\n raise ValueError('base_wd should not be None')\n\n def _is_in(self, param_group: dict, param_group_list: list) -> bool:\n \"\"\"check whether the `param_group` is in the`param_group_list`\"\"\"\n assert is_list_of(param_group_list, dict)\n param = set(param_group['params'])\n param_set = set()\n for group in param_group_list:\n param_set.update(set(group['params']))\n\n return not param.isdisjoint(param_set)\n\n def add_params(self,\n params: List[dict],\n module: nn.Module,\n prefix: str = '',\n is_dcn_module: Optional[Union[int, float]] = None) -> None:\n \"\"\"Add all parameters of module to the params list.\n\n The parameters of the given module will be added to the list of param\n groups, with specific rules defined by paramwise_cfg.\n\n Args:\n params (list[dict]): A list of param groups, it will be modified\n in place.\n module (nn.Module): The module to be added.\n prefix (str): The prefix of the module\n is_dcn_module (int|float|None): If the current module is a\n submodule of DCN, `is_dcn_module` will be passed to\n control conv_offset layer's learning rate. Defaults to None.\n \"\"\"\n # get param-wise options\n custom_keys = self.paramwise_cfg.get('custom_keys', {})\n # first sort with alphabet order and then sort with reversed len of str\n sorted_keys = sorted(sorted(custom_keys.keys()), key=len, reverse=True)\n\n bias_lr_mult = self.paramwise_cfg.get('bias_lr_mult', 1.)\n bias_decay_mult = self.paramwise_cfg.get('bias_decay_mult', 1.)\n norm_decay_mult = self.paramwise_cfg.get('norm_decay_mult', 1.)\n dwconv_decay_mult = self.paramwise_cfg.get('dwconv_decay_mult', 1.)\n bypass_duplicate = self.paramwise_cfg.get('bypass_duplicate', False)\n dcn_offset_lr_mult = self.paramwise_cfg.get('dcn_offset_lr_mult', 1.)\n\n # special rules for norm layers and depth-wise conv layers\n is_norm = isinstance(module,\n (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm))\n is_dwconv = (\n isinstance(module, torch.nn.Conv2d)\n and module.in_channels == module.groups)\n\n for name, param in module.named_parameters(recurse=False):\n param_group = {'params': [param]}\n if not param.requires_grad:\n params.append(param_group)\n continue\n if bypass_duplicate and self._is_in(param_group, params):\n warnings.warn(f'{prefix} is duplicate. It is skipped since '\n f'bypass_duplicate={bypass_duplicate}')\n continue\n # if the parameter match one of the custom keys, ignore other rules\n is_custom = False\n for key in sorted_keys:\n if key in f'{prefix}.{name}':\n is_custom = True\n lr_mult = custom_keys[key].get('lr_mult', 1.)\n param_group['lr'] = self.base_lr * lr_mult\n if self.base_wd is not None:\n decay_mult = custom_keys[key].get('decay_mult', 1.)\n param_group['weight_decay'] = self.base_wd * decay_mult\n break\n\n if not is_custom:\n # bias_lr_mult affects all bias parameters\n # except for norm.bias dcn.conv_offset.bias\n if name == 'bias' and not (is_norm or is_dcn_module):\n param_group['lr'] = self.base_lr * bias_lr_mult\n\n if (prefix.find('conv_offset') != -1 and is_dcn_module\n and isinstance(module, torch.nn.Conv2d)):\n # deal with both dcn_offset's bias & weight\n param_group['lr'] = self.base_lr * dcn_offset_lr_mult\n\n # apply weight decay policies\n if self.base_wd is not None:\n # norm decay\n if is_norm:\n param_group[\n 'weight_decay'] = self.base_wd * norm_decay_mult\n # depth-wise conv\n elif is_dwconv:\n param_group[\n 'weight_decay'] = self.base_wd * dwconv_decay_mult\n # bias lr and decay\n elif name == 'bias' and not is_dcn_module:\n # TODO: current bias_decay_mult will have affect on DCN\n param_group[\n 'weight_decay'] = self.base_wd * bias_decay_mult\n params.append(param_group)\n\n if mmcv_full_available():\n from mmcv.ops import DeformConv2d, ModulatedDeformConv2d\n is_dcn_module = isinstance(module,\n (DeformConv2d, ModulatedDeformConv2d))\n else:\n is_dcn_module = False\n for child_name, child_mod in module.named_children():\n child_prefix = f'{prefix}.{child_name}' if prefix else child_name\n self.add_params(\n params,\n child_mod,\n prefix=child_prefix,\n is_dcn_module=is_dcn_module)\n\n def __call__(self, model: nn.Module) -> torch.optim.Optimizer:\n if hasattr(model, 'module'):\n model = model.module\n\n optimizer_cfg = self.optimizer_cfg.copy()\n # if no paramwise option is specified, just use the global setting\n if not self.paramwise_cfg:\n optimizer_cfg['params'] = model.parameters()\n return OPTIMIZERS.build(optimizer_cfg)\n\n # set param-wise lr and weight decay recursively\n params: List = []\n self.add_params(params, model)\n optimizer_cfg['params'] = params\n\n return OPTIMIZERS.build(optimizer_cfg)\n", "path": "mmengine/optim/optimizer/default_constructor.py"}]} | 4,023 | 255 |
gh_patches_debug_63334 | rasdani/github-patches | git_diff | sanic-org__sanic-1527 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Publish 19.3 release to PyPI
Thank you for the release 3 days ago!
https://github.com/huge-success/sanic/releases/tag/19.3
It's missing from PyPI at the moment:
https://pypi.org/project/sanic/#history
Please publish it at your convenience 🙇
Keep up the awesome work ❤️
</issue>
<code>
[start of sanic/__init__.py]
1 from sanic.app import Sanic
2 from sanic.blueprints import Blueprint
3
4
5 __version__ = "18.12.0"
6
7 __all__ = ["Sanic", "Blueprint"]
8
[end of sanic/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sanic/__init__.py b/sanic/__init__.py
--- a/sanic/__init__.py
+++ b/sanic/__init__.py
@@ -2,6 +2,6 @@
from sanic.blueprints import Blueprint
-__version__ = "18.12.0"
+__version__ = "19.03.0"
__all__ = ["Sanic", "Blueprint"]
| {"golden_diff": "diff --git a/sanic/__init__.py b/sanic/__init__.py\n--- a/sanic/__init__.py\n+++ b/sanic/__init__.py\n@@ -2,6 +2,6 @@\n from sanic.blueprints import Blueprint\n \n \n-__version__ = \"18.12.0\"\n+__version__ = \"19.03.0\"\n \n __all__ = [\"Sanic\", \"Blueprint\"]\n", "issue": "Publish 19.3 release to PyPI\nThank you for the release 3 days ago!\r\n\r\nhttps://github.com/huge-success/sanic/releases/tag/19.3\r\n\r\nIt's missing from PyPI at the moment:\r\n\r\nhttps://pypi.org/project/sanic/#history\r\n\r\nPlease publish it at your convenience \ud83d\ude47 \r\n\r\nKeep up the awesome work \u2764\ufe0f \n", "before_files": [{"content": "from sanic.app import Sanic\nfrom sanic.blueprints import Blueprint\n\n\n__version__ = \"18.12.0\"\n\n__all__ = [\"Sanic\", \"Blueprint\"]\n", "path": "sanic/__init__.py"}]} | 666 | 95 |
gh_patches_debug_42094 | rasdani/github-patches | git_diff | crytic__slither-494 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`slither-flat` doesn't flatten dYdX solo contracts correctly
Hi There,
Thanks for building `slither-flat`! It seems to be a lot more forgivign than `soljitsu` or `truffle-flatten`. I'm struggling, however, with using it to flatten dYdX contracts.
**How to reproduce**
```bash
#!/usr/bin/bash
# Dependency: npm, nvm
# Better not to run this as a script
# Get the repository
echo "solo" > .gitignore
git clone https://github.com/dydxprotocol/solo.git
cd solo
# Install javascript dependencies
npm i keccakjs --save
npm i solium@^1.0.0 --save
npm i acorn --save
npm i
# Testing that everything installed correctly (make sure Docker is enabled at this point)
nvm use 10.16.2 # Need this version to compile contracts
npm test
# Install and run slither-flat
cd ..
pipenv install slither-analyzer
pipenv run slither-flat solo
```
**Issues**
I noticed two issues with these contracts:
1) Duplicate contracts
In particular, `ReentrancyGuard` appeared many times in different contract files (and multiple times per file)
2) Missing characters
In particular, `ReentrancyGuard` appeared as `ontract ReentrancyGuard` (missing a `c`)
3) Some contracts don't exist in their own file anymore
Contract `external/OnlySolo.sol` doesn't have a corresponding `OnlySolo.sol` file in the output, instead it's embedded inline with other contracts in multiple places. [perhaps this is intentional]
**Potential problems**
With `truffle-flatten` it flagged a circular dependency in the contracts, so perhaps this is related to the issue here.
`slither-flat` doesn't flatten dYdX solo contracts correctly
Hi There,
Thanks for building `slither-flat`! It seems to be a lot more forgivign than `soljitsu` or `truffle-flatten`. I'm struggling, however, with using it to flatten dYdX contracts.
**How to reproduce**
```bash
#!/usr/bin/bash
# Dependency: npm, nvm
# Better not to run this as a script
# Get the repository
echo "solo" > .gitignore
git clone https://github.com/dydxprotocol/solo.git
cd solo
# Install javascript dependencies
npm i keccakjs --save
npm i solium@^1.0.0 --save
npm i acorn --save
npm i
# Testing that everything installed correctly (make sure Docker is enabled at this point)
nvm use 10.16.2 # Need this version to compile contracts
npm test
# Install and run slither-flat
cd ..
pipenv install slither-analyzer
pipenv run slither-flat solo
```
**Issues**
I noticed two issues with these contracts:
1) Duplicate contracts
In particular, `ReentrancyGuard` appeared many times in different contract files (and multiple times per file)
2) Missing characters
In particular, `ReentrancyGuard` appeared as `ontract ReentrancyGuard` (missing a `c`)
3) Some contracts don't exist in their own file anymore
Contract `external/OnlySolo.sol` doesn't have a corresponding `OnlySolo.sol` file in the output, instead it's embedded inline with other contracts in multiple places. [perhaps this is intentional]
**Potential problems**
With `truffle-flatten` it flagged a circular dependency in the contracts, so perhaps this is related to the issue here.
</issue>
<code>
[start of slither/tools/flattening/flattening.py]
1 from pathlib import Path
2 import re
3 import logging
4 from collections import namedtuple
5
6 from slither.core.declarations import SolidityFunction
7 from slither.exceptions import SlitherException
8 from slither.core.solidity_types.user_defined_type import UserDefinedType
9 from slither.core.declarations.structure import Structure
10 from slither.core.declarations.enum import Enum
11 from slither.core.declarations.contract import Contract
12 from slither.slithir.operations import NewContract, TypeConversion, SolidityCall
13
14 logger = logging.getLogger("Slither-flattening")
15
16 # index: where to start
17 # patch_type:
18 # - public_to_external: public to external (external-to-public)
19 # - calldata_to_memory: calldata to memory (external-to-public)
20 # - line_removal: remove the line (remove-assert)
21 Patch = namedtuple('PatchExternal', ['index', 'patch_type'])
22
23 class Flattening:
24 DEFAULT_EXPORT_PATH = Path('crytic-export/flattening')
25
26 def __init__(self, slither, external_to_public=False, remove_assert=False, private_to_internal=False):
27 self._source_codes = {}
28 self._slither = slither
29 self._external_to_public = external_to_public
30 self._remove_assert = remove_assert
31 self._use_abi_encoder_v2 = False
32 self._private_to_internal = private_to_internal
33
34 self._check_abi_encoder_v2()
35
36 for contract in slither.contracts:
37 self._get_source_code(contract)
38
39 def _check_abi_encoder_v2(self):
40 for p in self._slither.pragma_directives:
41 if 'ABIEncoderV2' in str(p.directive):
42 self._use_abi_encoder_v2 = True
43 return
44
45 def _get_source_code(self, contract):
46 src_mapping = contract.source_mapping
47 content = self._slither.source_code[src_mapping['filename_absolute']]
48 start = src_mapping['start']
49 end = src_mapping['start'] + src_mapping['length']
50
51 to_patch = []
52 # interface must use external
53 if self._external_to_public and contract.contract_kind != "interface":
54 for f in contract.functions_declared:
55 # fallback must be external
56 if f.is_fallback or f.is_constructor_variables:
57 continue
58 if f.visibility == 'external':
59 attributes_start = (f.parameters_src.source_mapping['start'] +
60 f.parameters_src.source_mapping['length'])
61 attributes_end = f.returns_src.source_mapping['start']
62 attributes = content[attributes_start:attributes_end]
63 regex = re.search(r'((\sexternal)\s+)|(\sexternal)$|(\)external)$', attributes)
64 if regex:
65 to_patch.append(Patch(attributes_start + regex.span()[0] + 1, 'public_to_external'))
66 else:
67 raise SlitherException(f'External keyword not found {f.name} {attributes}')
68
69 for var in f.parameters:
70 if var.location == "calldata":
71 calldata_start = var.source_mapping['start']
72 calldata_end = calldata_start + var.source_mapping['length']
73 calldata_idx = content[calldata_start:calldata_end].find(' calldata ')
74 to_patch.append(Patch(calldata_start + calldata_idx + 1, 'calldata_to_memory'))
75
76 if self._private_to_internal:
77 for variable in contract.state_variables_declared:
78 if variable.visibility == 'private':
79 print(variable.source_mapping)
80 attributes_start = variable.source_mapping['start']
81 attributes_end = attributes_start + variable.source_mapping['length']
82 attributes = content[attributes_start:attributes_end]
83 print(attributes)
84 regex = re.search(r' private ', attributes)
85 if regex:
86 to_patch.append(Patch(attributes_start + regex.span()[0] + 1, 'private_to_internal'))
87 else:
88 raise SlitherException(f'private keyword not found {v.name} {attributes}')
89
90 if self._remove_assert:
91 for function in contract.functions_and_modifiers_declared:
92 for node in function.nodes:
93 for ir in node.irs:
94 if isinstance(ir, SolidityCall) and ir.function == SolidityFunction('assert(bool)'):
95 to_patch.append(Patch(node.source_mapping['start'], 'line_removal'))
96 logger.info(f'Code commented: {node.expression} ({node.source_mapping_str})')
97
98 to_patch.sort(key=lambda x: x.index, reverse=True)
99
100 content = content[start:end]
101 for patch in to_patch:
102 patch_type = patch.patch_type
103 index = patch.index
104 index = index - start
105 if patch_type == 'public_to_external':
106 content = content[:index] + 'public' + content[index + len('external'):]
107 if patch_type == 'private_to_internal':
108 content = content[:index] + 'internal' + content[index + len('private'):]
109 elif patch_type == 'calldata_to_memory':
110 content = content[:index] + 'memory' + content[index + len('calldata'):]
111 else:
112 assert patch_type == 'line_removal'
113 content = content[:index] + ' // ' + content[index:]
114
115 self._source_codes[contract] = content
116
117 def _export_from_type(self, t, contract, exported, list_contract):
118 if isinstance(t, UserDefinedType):
119 if isinstance(t.type, (Enum, Structure)):
120 if t.type.contract != contract and not t.type.contract in exported:
121 self._export_contract(t.type.contract, exported, list_contract)
122 else:
123 assert isinstance(t.type, Contract)
124 if t.type != contract and not t.type in exported:
125 self._export_contract(t.type, exported, list_contract)
126
127 def _export_contract(self, contract, exported, list_contract):
128 if contract.name in exported:
129 return
130 exported.add(contract.name)
131 for inherited in contract.inheritance:
132 self._export_contract(inherited, exported, list_contract)
133
134 # Find all the external contracts called
135 externals = contract.all_library_calls + contract.all_high_level_calls
136 # externals is a list of (contract, function)
137 # We also filter call to itself to avoid infilite loop
138 externals = list(set([e[0] for e in externals if e[0] != contract]))
139
140 for inherited in externals:
141 self._export_contract(inherited, exported, list_contract)
142
143 # Find all the external contracts use as a base type
144 local_vars = []
145 for f in contract.functions_declared:
146 local_vars += f.variables
147
148 for v in contract.variables + local_vars:
149 self._export_from_type(v.type, contract, exported, list_contract)
150
151 # Find all convert and "new" operation that can lead to use an external contract
152 for f in contract.functions_declared:
153 for ir in f.slithir_operations:
154 if isinstance(ir, NewContract):
155 if ir.contract_created != contract and not ir.contract_created in exported:
156 self._export_contract(ir.contract_created, exported, list_contract)
157 if isinstance(ir, TypeConversion):
158 self._export_from_type(ir.type, contract, exported, list_contract)
159 list_contract.append(self._source_codes[contract])
160
161 def _export(self, contract, ret):
162 self._export_contract(contract, set(), ret)
163 path = Path(self.DEFAULT_EXPORT_PATH, f'{contract.name}.sol')
164 logger.info(f'Export {path}')
165 with open(path, 'w') as f:
166 if self._slither.solc_version:
167 f.write(f'pragma solidity {self._slither.solc_version};\n')
168 if self._use_abi_encoder_v2:
169 f.write('pragma experimental ABIEncoderV2;\n')
170 f.write('\n'.join(ret))
171 f.write('\n')
172
173 def export(self, target=None):
174
175 if not self.DEFAULT_EXPORT_PATH.exists():
176 self.DEFAULT_EXPORT_PATH.mkdir(parents=True)
177
178 if target is None:
179 for contract in self._slither.contracts_derived:
180 ret = []
181 self._export(contract, ret)
182 else:
183 contract = self._slither.get_contract_from_name(target)
184 if contract is None:
185 logger.error(f'{target} not found')
186 else:
187 ret = []
188 self._export(contract, ret)
189
[end of slither/tools/flattening/flattening.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/slither/tools/flattening/flattening.py b/slither/tools/flattening/flattening.py
--- a/slither/tools/flattening/flattening.py
+++ b/slither/tools/flattening/flattening.py
@@ -4,6 +4,7 @@
from collections import namedtuple
from slither.core.declarations import SolidityFunction
+from slither.core.solidity_types import MappingType, ArrayType
from slither.exceptions import SlitherException
from slither.core.solidity_types.user_defined_type import UserDefinedType
from slither.core.declarations.structure import Structure
@@ -20,6 +21,7 @@
# - line_removal: remove the line (remove-assert)
Patch = namedtuple('PatchExternal', ['index', 'patch_type'])
+
class Flattening:
DEFAULT_EXPORT_PATH = Path('crytic-export/flattening')
@@ -44,7 +46,7 @@
def _get_source_code(self, contract):
src_mapping = contract.source_mapping
- content = self._slither.source_code[src_mapping['filename_absolute']]
+ content = self._slither.source_code[src_mapping['filename_absolute']].encode('utf8')
start = src_mapping['start']
end = src_mapping['start'] + src_mapping['length']
@@ -112,17 +114,22 @@
assert patch_type == 'line_removal'
content = content[:index] + ' // ' + content[index:]
- self._source_codes[contract] = content
+ self._source_codes[contract] = content.decode('utf8')
def _export_from_type(self, t, contract, exported, list_contract):
if isinstance(t, UserDefinedType):
if isinstance(t.type, (Enum, Structure)):
- if t.type.contract != contract and not t.type.contract in exported:
+ if t.type.contract != contract and t.type.contract not in exported:
self._export_contract(t.type.contract, exported, list_contract)
else:
assert isinstance(t.type, Contract)
- if t.type != contract and not t.type in exported:
+ if t.type != contract and t.type not in exported:
self._export_contract(t.type, exported, list_contract)
+ elif isinstance(t, MappingType):
+ self._export_from_type(t.type_from, contract, exported, list_contract)
+ self._export_from_type(t.type_to, contract, exported, list_contract)
+ elif isinstance(t, ArrayType):
+ self._export_from_type(t.type, contract, exported, list_contract)
def _export_contract(self, contract, exported, list_contract):
if contract.name in exported:
@@ -148,6 +155,10 @@
for v in contract.variables + local_vars:
self._export_from_type(v.type, contract, exported, list_contract)
+ for s in contract.structures:
+ for elem in s.elems.values():
+ self._export_from_type(elem.type, contract, exported, list_contract)
+
# Find all convert and "new" operation that can lead to use an external contract
for f in contract.functions_declared:
for ir in f.slithir_operations:
| {"golden_diff": "diff --git a/slither/tools/flattening/flattening.py b/slither/tools/flattening/flattening.py\n--- a/slither/tools/flattening/flattening.py\n+++ b/slither/tools/flattening/flattening.py\n@@ -4,6 +4,7 @@\n from collections import namedtuple\n \n from slither.core.declarations import SolidityFunction\n+from slither.core.solidity_types import MappingType, ArrayType\n from slither.exceptions import SlitherException\n from slither.core.solidity_types.user_defined_type import UserDefinedType\n from slither.core.declarations.structure import Structure\n@@ -20,6 +21,7 @@\n # - line_removal: remove the line (remove-assert)\n Patch = namedtuple('PatchExternal', ['index', 'patch_type'])\n \n+\n class Flattening:\n DEFAULT_EXPORT_PATH = Path('crytic-export/flattening')\n \n@@ -44,7 +46,7 @@\n \n def _get_source_code(self, contract):\n src_mapping = contract.source_mapping\n- content = self._slither.source_code[src_mapping['filename_absolute']]\n+ content = self._slither.source_code[src_mapping['filename_absolute']].encode('utf8')\n start = src_mapping['start']\n end = src_mapping['start'] + src_mapping['length']\n \n@@ -112,17 +114,22 @@\n assert patch_type == 'line_removal'\n content = content[:index] + ' // ' + content[index:]\n \n- self._source_codes[contract] = content\n+ self._source_codes[contract] = content.decode('utf8')\n \n def _export_from_type(self, t, contract, exported, list_contract):\n if isinstance(t, UserDefinedType):\n if isinstance(t.type, (Enum, Structure)):\n- if t.type.contract != contract and not t.type.contract in exported:\n+ if t.type.contract != contract and t.type.contract not in exported:\n self._export_contract(t.type.contract, exported, list_contract)\n else:\n assert isinstance(t.type, Contract)\n- if t.type != contract and not t.type in exported:\n+ if t.type != contract and t.type not in exported:\n self._export_contract(t.type, exported, list_contract)\n+ elif isinstance(t, MappingType):\n+ self._export_from_type(t.type_from, contract, exported, list_contract)\n+ self._export_from_type(t.type_to, contract, exported, list_contract)\n+ elif isinstance(t, ArrayType):\n+ self._export_from_type(t.type, contract, exported, list_contract)\n \n def _export_contract(self, contract, exported, list_contract):\n if contract.name in exported:\n@@ -148,6 +155,10 @@\n for v in contract.variables + local_vars:\n self._export_from_type(v.type, contract, exported, list_contract)\n \n+ for s in contract.structures:\n+ for elem in s.elems.values():\n+ self._export_from_type(elem.type, contract, exported, list_contract)\n+\n # Find all convert and \"new\" operation that can lead to use an external contract\n for f in contract.functions_declared:\n for ir in f.slithir_operations:\n", "issue": "`slither-flat` doesn't flatten dYdX solo contracts correctly\nHi There,\r\n\r\nThanks for building `slither-flat`! It seems to be a lot more forgivign than `soljitsu` or `truffle-flatten`. I'm struggling, however, with using it to flatten dYdX contracts.\r\n\r\n**How to reproduce**\r\n\r\n```bash\r\n#!/usr/bin/bash\r\n\r\n# Dependency: npm, nvm\r\n# Better not to run this as a script\r\n\r\n# Get the repository\r\necho \"solo\" > .gitignore\r\ngit clone https://github.com/dydxprotocol/solo.git\r\ncd solo\r\n\r\n# Install javascript dependencies\r\nnpm i keccakjs --save\r\nnpm i solium@^1.0.0 --save\r\nnpm i acorn --save\r\nnpm i\r\n\r\n# Testing that everything installed correctly (make sure Docker is enabled at this point)\r\nnvm use 10.16.2 # Need this version to compile contracts\r\nnpm test\r\n\r\n# Install and run slither-flat\r\ncd ..\r\npipenv install slither-analyzer\r\npipenv run slither-flat solo\r\n```\r\n\r\n**Issues**\r\n\r\nI noticed two issues with these contracts:\r\n\r\n1) Duplicate contracts\r\n\r\nIn particular, `ReentrancyGuard` appeared many times in different contract files (and multiple times per file)\r\n\r\n2) Missing characters\r\n\r\nIn particular, `ReentrancyGuard` appeared as `ontract ReentrancyGuard` (missing a `c`)\r\n\r\n3) Some contracts don't exist in their own file anymore\r\n\r\nContract `external/OnlySolo.sol` doesn't have a corresponding `OnlySolo.sol` file in the output, instead it's embedded inline with other contracts in multiple places. [perhaps this is intentional]\r\n\r\n**Potential problems**\r\n\r\nWith `truffle-flatten` it flagged a circular dependency in the contracts, so perhaps this is related to the issue here.\n`slither-flat` doesn't flatten dYdX solo contracts correctly\nHi There,\r\n\r\nThanks for building `slither-flat`! It seems to be a lot more forgivign than `soljitsu` or `truffle-flatten`. I'm struggling, however, with using it to flatten dYdX contracts.\r\n\r\n**How to reproduce**\r\n\r\n```bash\r\n#!/usr/bin/bash\r\n\r\n# Dependency: npm, nvm\r\n# Better not to run this as a script\r\n\r\n# Get the repository\r\necho \"solo\" > .gitignore\r\ngit clone https://github.com/dydxprotocol/solo.git\r\ncd solo\r\n\r\n# Install javascript dependencies\r\nnpm i keccakjs --save\r\nnpm i solium@^1.0.0 --save\r\nnpm i acorn --save\r\nnpm i\r\n\r\n# Testing that everything installed correctly (make sure Docker is enabled at this point)\r\nnvm use 10.16.2 # Need this version to compile contracts\r\nnpm test\r\n\r\n# Install and run slither-flat\r\ncd ..\r\npipenv install slither-analyzer\r\npipenv run slither-flat solo\r\n```\r\n\r\n**Issues**\r\n\r\nI noticed two issues with these contracts:\r\n\r\n1) Duplicate contracts\r\n\r\nIn particular, `ReentrancyGuard` appeared many times in different contract files (and multiple times per file)\r\n\r\n2) Missing characters\r\n\r\nIn particular, `ReentrancyGuard` appeared as `ontract ReentrancyGuard` (missing a `c`)\r\n\r\n3) Some contracts don't exist in their own file anymore\r\n\r\nContract `external/OnlySolo.sol` doesn't have a corresponding `OnlySolo.sol` file in the output, instead it's embedded inline with other contracts in multiple places. [perhaps this is intentional]\r\n\r\n**Potential problems**\r\n\r\nWith `truffle-flatten` it flagged a circular dependency in the contracts, so perhaps this is related to the issue here.\n", "before_files": [{"content": "from pathlib import Path\nimport re\nimport logging\nfrom collections import namedtuple\n\nfrom slither.core.declarations import SolidityFunction\nfrom slither.exceptions import SlitherException\nfrom slither.core.solidity_types.user_defined_type import UserDefinedType\nfrom slither.core.declarations.structure import Structure\nfrom slither.core.declarations.enum import Enum\nfrom slither.core.declarations.contract import Contract\nfrom slither.slithir.operations import NewContract, TypeConversion, SolidityCall\n\nlogger = logging.getLogger(\"Slither-flattening\")\n\n# index: where to start\n# patch_type:\n# - public_to_external: public to external (external-to-public)\n# - calldata_to_memory: calldata to memory (external-to-public)\n# - line_removal: remove the line (remove-assert)\nPatch = namedtuple('PatchExternal', ['index', 'patch_type'])\n\nclass Flattening:\n DEFAULT_EXPORT_PATH = Path('crytic-export/flattening')\n\n def __init__(self, slither, external_to_public=False, remove_assert=False, private_to_internal=False):\n self._source_codes = {}\n self._slither = slither\n self._external_to_public = external_to_public\n self._remove_assert = remove_assert\n self._use_abi_encoder_v2 = False\n self._private_to_internal = private_to_internal\n\n self._check_abi_encoder_v2()\n\n for contract in slither.contracts:\n self._get_source_code(contract)\n\n def _check_abi_encoder_v2(self):\n for p in self._slither.pragma_directives:\n if 'ABIEncoderV2' in str(p.directive):\n self._use_abi_encoder_v2 = True\n return\n\n def _get_source_code(self, contract):\n src_mapping = contract.source_mapping\n content = self._slither.source_code[src_mapping['filename_absolute']]\n start = src_mapping['start']\n end = src_mapping['start'] + src_mapping['length']\n\n to_patch = []\n # interface must use external\n if self._external_to_public and contract.contract_kind != \"interface\":\n for f in contract.functions_declared:\n # fallback must be external\n if f.is_fallback or f.is_constructor_variables:\n continue\n if f.visibility == 'external':\n attributes_start = (f.parameters_src.source_mapping['start'] +\n f.parameters_src.source_mapping['length'])\n attributes_end = f.returns_src.source_mapping['start']\n attributes = content[attributes_start:attributes_end]\n regex = re.search(r'((\\sexternal)\\s+)|(\\sexternal)$|(\\)external)$', attributes)\n if regex:\n to_patch.append(Patch(attributes_start + regex.span()[0] + 1, 'public_to_external'))\n else:\n raise SlitherException(f'External keyword not found {f.name} {attributes}')\n\n for var in f.parameters:\n if var.location == \"calldata\":\n calldata_start = var.source_mapping['start']\n calldata_end = calldata_start + var.source_mapping['length']\n calldata_idx = content[calldata_start:calldata_end].find(' calldata ')\n to_patch.append(Patch(calldata_start + calldata_idx + 1, 'calldata_to_memory'))\n\n if self._private_to_internal:\n for variable in contract.state_variables_declared:\n if variable.visibility == 'private':\n print(variable.source_mapping)\n attributes_start = variable.source_mapping['start']\n attributes_end = attributes_start + variable.source_mapping['length']\n attributes = content[attributes_start:attributes_end]\n print(attributes)\n regex = re.search(r' private ', attributes)\n if regex:\n to_patch.append(Patch(attributes_start + regex.span()[0] + 1, 'private_to_internal'))\n else:\n raise SlitherException(f'private keyword not found {v.name} {attributes}')\n\n if self._remove_assert:\n for function in contract.functions_and_modifiers_declared:\n for node in function.nodes:\n for ir in node.irs:\n if isinstance(ir, SolidityCall) and ir.function == SolidityFunction('assert(bool)'):\n to_patch.append(Patch(node.source_mapping['start'], 'line_removal'))\n logger.info(f'Code commented: {node.expression} ({node.source_mapping_str})')\n\n to_patch.sort(key=lambda x: x.index, reverse=True)\n\n content = content[start:end]\n for patch in to_patch:\n patch_type = patch.patch_type\n index = patch.index\n index = index - start\n if patch_type == 'public_to_external':\n content = content[:index] + 'public' + content[index + len('external'):]\n if patch_type == 'private_to_internal':\n content = content[:index] + 'internal' + content[index + len('private'):]\n elif patch_type == 'calldata_to_memory':\n content = content[:index] + 'memory' + content[index + len('calldata'):]\n else:\n assert patch_type == 'line_removal'\n content = content[:index] + ' // ' + content[index:]\n\n self._source_codes[contract] = content\n\n def _export_from_type(self, t, contract, exported, list_contract):\n if isinstance(t, UserDefinedType):\n if isinstance(t.type, (Enum, Structure)):\n if t.type.contract != contract and not t.type.contract in exported:\n self._export_contract(t.type.contract, exported, list_contract)\n else:\n assert isinstance(t.type, Contract)\n if t.type != contract and not t.type in exported:\n self._export_contract(t.type, exported, list_contract)\n\n def _export_contract(self, contract, exported, list_contract):\n if contract.name in exported:\n return\n exported.add(contract.name)\n for inherited in contract.inheritance:\n self._export_contract(inherited, exported, list_contract)\n\n # Find all the external contracts called\n externals = contract.all_library_calls + contract.all_high_level_calls\n # externals is a list of (contract, function)\n # We also filter call to itself to avoid infilite loop\n externals = list(set([e[0] for e in externals if e[0] != contract]))\n\n for inherited in externals:\n self._export_contract(inherited, exported, list_contract)\n\n # Find all the external contracts use as a base type\n local_vars = []\n for f in contract.functions_declared:\n local_vars += f.variables\n\n for v in contract.variables + local_vars:\n self._export_from_type(v.type, contract, exported, list_contract)\n\n # Find all convert and \"new\" operation that can lead to use an external contract\n for f in contract.functions_declared:\n for ir in f.slithir_operations:\n if isinstance(ir, NewContract):\n if ir.contract_created != contract and not ir.contract_created in exported:\n self._export_contract(ir.contract_created, exported, list_contract)\n if isinstance(ir, TypeConversion):\n self._export_from_type(ir.type, contract, exported, list_contract)\n list_contract.append(self._source_codes[contract])\n\n def _export(self, contract, ret):\n self._export_contract(contract, set(), ret)\n path = Path(self.DEFAULT_EXPORT_PATH, f'{contract.name}.sol')\n logger.info(f'Export {path}')\n with open(path, 'w') as f:\n if self._slither.solc_version:\n f.write(f'pragma solidity {self._slither.solc_version};\\n')\n if self._use_abi_encoder_v2:\n f.write('pragma experimental ABIEncoderV2;\\n')\n f.write('\\n'.join(ret))\n f.write('\\n')\n\n def export(self, target=None):\n\n if not self.DEFAULT_EXPORT_PATH.exists():\n self.DEFAULT_EXPORT_PATH.mkdir(parents=True)\n\n if target is None:\n for contract in self._slither.contracts_derived:\n ret = []\n self._export(contract, ret)\n else:\n contract = self._slither.get_contract_from_name(target)\n if contract is None:\n logger.error(f'{target} not found')\n else:\n ret = []\n self._export(contract, ret)\n", "path": "slither/tools/flattening/flattening.py"}]} | 3,565 | 701 |
gh_patches_debug_2102 | rasdani/github-patches | git_diff | rucio__rucio-1372 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix activity in BB8
Motivation
----------
BB8 uses activity `Data Rebalancing` but the activity defined in ATLAS schema is `Data rebalancing`. We should use the same activity everywhere, and it should be consistent with the share defined in FTS
</issue>
<code>
[start of lib/rucio/vcsversion.py]
1
2 '''
3 This file is automatically generated; Do not edit it. :)
4 '''
5 VERSION_INFO = {
6 'final': True,
7 'version': '1.17.4',
8 'branch_nick': 'patch-0-1_17_4_client_release_prep',
9 'revision_id': 'ba996ce9bf8366cd7d8d1fb60a7f1daf8d4f517e',
10 'revno': 6827
11 }
12
[end of lib/rucio/vcsversion.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/rucio/vcsversion.py b/lib/rucio/vcsversion.py
--- a/lib/rucio/vcsversion.py
+++ b/lib/rucio/vcsversion.py
@@ -4,8 +4,8 @@
'''
VERSION_INFO = {
'final': True,
- 'version': '1.17.4',
- 'branch_nick': 'patch-0-1_17_4_client_release_prep',
- 'revision_id': 'ba996ce9bf8366cd7d8d1fb60a7f1daf8d4f517e',
- 'revno': 6827
+ 'version': '1.17.5',
+ 'branch_nick': 'patch-0-1_17_5_preparation',
+ 'revision_id': '537e1e47eb627741394b6bb9bc21d0f046296275',
+ 'revno': 6837
}
| {"golden_diff": "diff --git a/lib/rucio/vcsversion.py b/lib/rucio/vcsversion.py\n--- a/lib/rucio/vcsversion.py\n+++ b/lib/rucio/vcsversion.py\n@@ -4,8 +4,8 @@\n '''\n VERSION_INFO = {\n 'final': True,\n- 'version': '1.17.4',\n- 'branch_nick': 'patch-0-1_17_4_client_release_prep',\n- 'revision_id': 'ba996ce9bf8366cd7d8d1fb60a7f1daf8d4f517e',\n- 'revno': 6827\n+ 'version': '1.17.5',\n+ 'branch_nick': 'patch-0-1_17_5_preparation',\n+ 'revision_id': '537e1e47eb627741394b6bb9bc21d0f046296275',\n+ 'revno': 6837\n }\n", "issue": "Fix activity in BB8\nMotivation\r\n----------\r\n\r\nBB8 uses activity `Data Rebalancing` but the activity defined in ATLAS schema is `Data rebalancing`. We should use the same activity everywhere, and it should be consistent with the share defined in FTS\n", "before_files": [{"content": "\n'''\nThis file is automatically generated; Do not edit it. :)\n'''\nVERSION_INFO = {\n 'final': True,\n 'version': '1.17.4',\n 'branch_nick': 'patch-0-1_17_4_client_release_prep',\n 'revision_id': 'ba996ce9bf8366cd7d8d1fb60a7f1daf8d4f517e',\n 'revno': 6827\n}\n", "path": "lib/rucio/vcsversion.py"}]} | 722 | 243 |
gh_patches_debug_19673 | rasdani/github-patches | git_diff | pypa__setuptools-3023 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AssertionError no _distutils when running get-pip with Setuptools and USE_DISTUTILS=local
Somehow related: Since `setuptools` v60 at least on Debian running `get-pip.py` over an existing `pip` + `setuptools` fails, when the Debian `python3-distutils` package is installed, e.g. as dependency of the Python development headers package:
```
/usr/local/lib/python3.9/dist-packages/_distutils_hack/__init__.py:24: UserWarning: Distutils was imported before Setuptools, but importing Setuptools also replaces the `distutils` module in `sys.modules`. This may lead to undesirable behaviors or errors. To avoid these issues, avoid using distutils directly, ensure that setuptools is installed in the traditional way (e.g. not an editable install), and/or make sure that setuptools is always imported before distutils.
warnings.warn(
/usr/local/lib/python3.9/dist-packages/_distutils_hack/__init__.py:36: UserWarning: Setuptools is replacing distutils.
warnings.warn("Setuptools is replacing distutils.")
Traceback (most recent call last):
File "/tmp/DietPi-Software/./get-pip.py", line 27081, in <module>
main()
File "/tmp/DietPi-Software/./get-pip.py", line 139, in main
bootstrap(tmpdir=tmpdir)
File "/tmp/DietPi-Software/./get-pip.py", line 120, in bootstrap
args = determine_pip_install_arguments()
File "/tmp/DietPi-Software/./get-pip.py", line 65, in determine_pip_install_arguments
import setuptools # noqa
File "/usr/local/lib/python3.9/dist-packages/setuptools/__init__.py", line 8, in <module>
import _distutils_hack.override # noqa: F401
File "/usr/local/lib/python3.9/dist-packages/_distutils_hack/override.py", line 1, in <module>
__import__('_distutils_hack').do_override()
File "/usr/local/lib/python3.9/dist-packages/_distutils_hack/__init__.py", line 73, in do_override
ensure_local_distutils()
File "/usr/local/lib/python3.9/dist-packages/_distutils_hack/__init__.py", line 61, in ensure_local_distutils
assert '_distutils' in core.__file__, core.__file__
AssertionError: /usr/lib/python3.9/distutils/core.py
```
This `_distutils_hack` comes from `get-pip.py`. Looks like removing any use and detangling `distutils` from `pip` in favour of `setuptools` would solve both issues.
_Originally posted by @MichaIng in https://github.com/pypa/pip/issues/10742#issuecomment-1003741034_
</issue>
<code>
[start of _distutils_hack/__init__.py]
1 # don't import any costly modules
2 import sys
3 import os
4
5
6 is_pypy = '__pypy__' in sys.builtin_module_names
7
8
9 def warn_distutils_present():
10 if 'distutils' not in sys.modules:
11 return
12 if is_pypy and sys.version_info < (3, 7):
13 # PyPy for 3.6 unconditionally imports distutils, so bypass the warning
14 # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
15 return
16 import warnings
17 warnings.warn(
18 "Distutils was imported before Setuptools, but importing Setuptools "
19 "also replaces the `distutils` module in `sys.modules`. This may lead "
20 "to undesirable behaviors or errors. To avoid these issues, avoid "
21 "using distutils directly, ensure that setuptools is installed in the "
22 "traditional way (e.g. not an editable install), and/or make sure "
23 "that setuptools is always imported before distutils.")
24
25
26 def clear_distutils():
27 if 'distutils' not in sys.modules:
28 return
29 import warnings
30 warnings.warn("Setuptools is replacing distutils.")
31 mods = [
32 name for name in sys.modules
33 if name == "distutils" or name.startswith("distutils.")
34 ]
35 for name in mods:
36 del sys.modules[name]
37
38
39 def enabled():
40 """
41 Allow selection of distutils by environment variable.
42 """
43 which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')
44 return which == 'local'
45
46
47 def ensure_local_distutils():
48 import importlib
49 clear_distutils()
50
51 # With the DistutilsMetaFinder in place,
52 # perform an import to cause distutils to be
53 # loaded from setuptools._distutils. Ref #2906.
54 with shim():
55 importlib.import_module('distutils')
56
57 # check that submodules load as expected
58 core = importlib.import_module('distutils.core')
59 assert '_distutils' in core.__file__, core.__file__
60
61
62 def do_override():
63 """
64 Ensure that the local copy of distutils is preferred over stdlib.
65
66 See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
67 for more motivation.
68 """
69 if enabled():
70 warn_distutils_present()
71 ensure_local_distutils()
72
73
74 class _TrivialRe:
75 def __init__(self, *patterns):
76 self._patterns = patterns
77
78 def match(self, string):
79 return all(pat in string for pat in self._patterns)
80
81
82 class DistutilsMetaFinder:
83 def find_spec(self, fullname, path, target=None):
84 if path is not None:
85 return
86
87 method_name = 'spec_for_{fullname}'.format(**locals())
88 method = getattr(self, method_name, lambda: None)
89 return method()
90
91 def spec_for_distutils(self):
92 import importlib
93 import importlib.abc
94 import importlib.util
95 import warnings
96
97 # warnings.filterwarnings() imports the re module
98 warnings._add_filter(
99 'ignore',
100 _TrivialRe("distutils", "deprecated"),
101 DeprecationWarning,
102 None,
103 0,
104 append=True
105 )
106
107 try:
108 mod = importlib.import_module('setuptools._distutils')
109 except Exception:
110 # There are a couple of cases where setuptools._distutils
111 # may not be present:
112 # - An older Setuptools without a local distutils is
113 # taking precedence. Ref #2957.
114 # - Path manipulation during sitecustomize removes
115 # setuptools from the path but only after the hook
116 # has been loaded. Ref #2980.
117 # In either case, fall back to stdlib behavior.
118 return
119
120 class DistutilsLoader(importlib.abc.Loader):
121
122 def create_module(self, spec):
123 return mod
124
125 def exec_module(self, module):
126 pass
127
128 return importlib.util.spec_from_loader(
129 'distutils', DistutilsLoader(), origin=mod.__file__
130 )
131
132 def spec_for_pip(self):
133 """
134 Ensure stdlib distutils when running under pip.
135 See pypa/pip#8761 for rationale.
136 """
137 if self.pip_imported_during_build():
138 return
139 if self.is_get_pip():
140 return
141 clear_distutils()
142 self.spec_for_distutils = lambda: None
143
144 @classmethod
145 def pip_imported_during_build(cls):
146 """
147 Detect if pip is being imported in a build script. Ref #2355.
148 """
149 import traceback
150 return any(
151 cls.frame_file_is_setup(frame)
152 for frame, line in traceback.walk_stack(None)
153 )
154
155 @classmethod
156 def is_get_pip(cls):
157 """
158 Detect if get-pip is being invoked. Ref #2993.
159 """
160 try:
161 import __main__
162 return os.path.basename(__main__.__file__) == 'get-pip.py'
163 except AttributeError:
164 pass
165
166 @staticmethod
167 def frame_file_is_setup(frame):
168 """
169 Return True if the indicated frame suggests a setup.py file.
170 """
171 # some frames may not have __file__ (#2940)
172 return frame.f_globals.get('__file__', '').endswith('setup.py')
173
174
175 DISTUTILS_FINDER = DistutilsMetaFinder()
176
177
178 def add_shim():
179 DISTUTILS_FINDER in sys.meta_path or insert_shim()
180
181
182 class shim:
183 def __enter__(self):
184 insert_shim()
185
186 def __exit__(self, exc, value, tb):
187 remove_shim()
188
189
190 def insert_shim():
191 sys.meta_path.insert(0, DISTUTILS_FINDER)
192
193
194 def remove_shim():
195 try:
196 sys.meta_path.remove(DISTUTILS_FINDER)
197 except ValueError:
198 pass
199
[end of _distutils_hack/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/_distutils_hack/__init__.py b/_distutils_hack/__init__.py
--- a/_distutils_hack/__init__.py
+++ b/_distutils_hack/__init__.py
@@ -136,11 +136,36 @@
"""
if self.pip_imported_during_build():
return
- if self.is_get_pip():
- return
clear_distutils()
self.spec_for_distutils = lambda: None
+ def spec_for_setuptools(self):
+ """
+ get-pip imports setuptools solely for the purpose of
+ determining if it's installed. In this case, provide
+ a stubbed spec to represent setuptools being present
+ without invoking any behavior.
+
+ Workaround for pypa/get-pip#137.
+ """
+ if not self.is_get_pip():
+ return
+
+ import importlib
+
+ class StubbedLoader(importlib.abc.Loader):
+
+ def create_module(self, spec):
+ import types
+ return types.ModuleType('setuptools')
+
+ def exec_module(self, module):
+ pass
+
+ return importlib.util.spec_from_loader(
+ 'setuptools', StubbedLoader(),
+ )
+
@classmethod
def pip_imported_during_build(cls):
"""
| {"golden_diff": "diff --git a/_distutils_hack/__init__.py b/_distutils_hack/__init__.py\n--- a/_distutils_hack/__init__.py\n+++ b/_distutils_hack/__init__.py\n@@ -136,11 +136,36 @@\n \"\"\"\n if self.pip_imported_during_build():\n return\n- if self.is_get_pip():\n- return\n clear_distutils()\n self.spec_for_distutils = lambda: None\n \n+ def spec_for_setuptools(self):\n+ \"\"\"\n+ get-pip imports setuptools solely for the purpose of\n+ determining if it's installed. In this case, provide\n+ a stubbed spec to represent setuptools being present\n+ without invoking any behavior.\n+\n+ Workaround for pypa/get-pip#137.\n+ \"\"\"\n+ if not self.is_get_pip():\n+ return\n+\n+ import importlib\n+\n+ class StubbedLoader(importlib.abc.Loader):\n+\n+ def create_module(self, spec):\n+ import types\n+ return types.ModuleType('setuptools')\n+\n+ def exec_module(self, module):\n+ pass\n+\n+ return importlib.util.spec_from_loader(\n+ 'setuptools', StubbedLoader(),\n+ )\n+\n @classmethod\n def pip_imported_during_build(cls):\n \"\"\"\n", "issue": "AssertionError no _distutils when running get-pip with Setuptools and USE_DISTUTILS=local\nSomehow related: Since `setuptools` v60 at least on Debian running `get-pip.py` over an existing `pip` + `setuptools` fails, when the Debian `python3-distutils` package is installed, e.g. as dependency of the Python development headers package:\r\n```\r\n/usr/local/lib/python3.9/dist-packages/_distutils_hack/__init__.py:24: UserWarning: Distutils was imported before Setuptools, but importing Setuptools also replaces the `distutils` module in `sys.modules`. This may lead to undesirable behaviors or errors. To avoid these issues, avoid using distutils directly, ensure that setuptools is installed in the traditional way (e.g. not an editable install), and/or make sure that setuptools is always imported before distutils.\r\n warnings.warn(\r\n/usr/local/lib/python3.9/dist-packages/_distutils_hack/__init__.py:36: UserWarning: Setuptools is replacing distutils.\r\n warnings.warn(\"Setuptools is replacing distutils.\")\r\nTraceback (most recent call last):\r\n File \"/tmp/DietPi-Software/./get-pip.py\", line 27081, in <module>\r\n main()\r\n File \"/tmp/DietPi-Software/./get-pip.py\", line 139, in main\r\n bootstrap(tmpdir=tmpdir)\r\n File \"/tmp/DietPi-Software/./get-pip.py\", line 120, in bootstrap\r\n args = determine_pip_install_arguments()\r\n File \"/tmp/DietPi-Software/./get-pip.py\", line 65, in determine_pip_install_arguments\r\n import setuptools # noqa\r\n File \"/usr/local/lib/python3.9/dist-packages/setuptools/__init__.py\", line 8, in <module>\r\n import _distutils_hack.override # noqa: F401\r\n File \"/usr/local/lib/python3.9/dist-packages/_distutils_hack/override.py\", line 1, in <module>\r\n __import__('_distutils_hack').do_override()\r\n File \"/usr/local/lib/python3.9/dist-packages/_distutils_hack/__init__.py\", line 73, in do_override\r\n ensure_local_distutils()\r\n File \"/usr/local/lib/python3.9/dist-packages/_distutils_hack/__init__.py\", line 61, in ensure_local_distutils\r\n assert '_distutils' in core.__file__, core.__file__\r\nAssertionError: /usr/lib/python3.9/distutils/core.py\r\n```\r\nThis `_distutils_hack` comes from `get-pip.py`. Looks like removing any use and detangling `distutils` from `pip` in favour of `setuptools` would solve both issues.\r\n\r\n_Originally posted by @MichaIng in https://github.com/pypa/pip/issues/10742#issuecomment-1003741034_\n", "before_files": [{"content": "# don't import any costly modules\nimport sys\nimport os\n\n\nis_pypy = '__pypy__' in sys.builtin_module_names\n\n\ndef warn_distutils_present():\n if 'distutils' not in sys.modules:\n return\n if is_pypy and sys.version_info < (3, 7):\n # PyPy for 3.6 unconditionally imports distutils, so bypass the warning\n # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250\n return\n import warnings\n warnings.warn(\n \"Distutils was imported before Setuptools, but importing Setuptools \"\n \"also replaces the `distutils` module in `sys.modules`. This may lead \"\n \"to undesirable behaviors or errors. To avoid these issues, avoid \"\n \"using distutils directly, ensure that setuptools is installed in the \"\n \"traditional way (e.g. not an editable install), and/or make sure \"\n \"that setuptools is always imported before distutils.\")\n\n\ndef clear_distutils():\n if 'distutils' not in sys.modules:\n return\n import warnings\n warnings.warn(\"Setuptools is replacing distutils.\")\n mods = [\n name for name in sys.modules\n if name == \"distutils\" or name.startswith(\"distutils.\")\n ]\n for name in mods:\n del sys.modules[name]\n\n\ndef enabled():\n \"\"\"\n Allow selection of distutils by environment variable.\n \"\"\"\n which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')\n return which == 'local'\n\n\ndef ensure_local_distutils():\n import importlib\n clear_distutils()\n\n # With the DistutilsMetaFinder in place,\n # perform an import to cause distutils to be\n # loaded from setuptools._distutils. Ref #2906.\n with shim():\n importlib.import_module('distutils')\n\n # check that submodules load as expected\n core = importlib.import_module('distutils.core')\n assert '_distutils' in core.__file__, core.__file__\n\n\ndef do_override():\n \"\"\"\n Ensure that the local copy of distutils is preferred over stdlib.\n\n See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401\n for more motivation.\n \"\"\"\n if enabled():\n warn_distutils_present()\n ensure_local_distutils()\n\n\nclass _TrivialRe:\n def __init__(self, *patterns):\n self._patterns = patterns\n\n def match(self, string):\n return all(pat in string for pat in self._patterns)\n\n\nclass DistutilsMetaFinder:\n def find_spec(self, fullname, path, target=None):\n if path is not None:\n return\n\n method_name = 'spec_for_{fullname}'.format(**locals())\n method = getattr(self, method_name, lambda: None)\n return method()\n\n def spec_for_distutils(self):\n import importlib\n import importlib.abc\n import importlib.util\n import warnings\n\n # warnings.filterwarnings() imports the re module\n warnings._add_filter(\n 'ignore',\n _TrivialRe(\"distutils\", \"deprecated\"),\n DeprecationWarning,\n None,\n 0,\n append=True\n )\n\n try:\n mod = importlib.import_module('setuptools._distutils')\n except Exception:\n # There are a couple of cases where setuptools._distutils\n # may not be present:\n # - An older Setuptools without a local distutils is\n # taking precedence. Ref #2957.\n # - Path manipulation during sitecustomize removes\n # setuptools from the path but only after the hook\n # has been loaded. Ref #2980.\n # In either case, fall back to stdlib behavior.\n return\n\n class DistutilsLoader(importlib.abc.Loader):\n\n def create_module(self, spec):\n return mod\n\n def exec_module(self, module):\n pass\n\n return importlib.util.spec_from_loader(\n 'distutils', DistutilsLoader(), origin=mod.__file__\n )\n\n def spec_for_pip(self):\n \"\"\"\n Ensure stdlib distutils when running under pip.\n See pypa/pip#8761 for rationale.\n \"\"\"\n if self.pip_imported_during_build():\n return\n if self.is_get_pip():\n return\n clear_distutils()\n self.spec_for_distutils = lambda: None\n\n @classmethod\n def pip_imported_during_build(cls):\n \"\"\"\n Detect if pip is being imported in a build script. Ref #2355.\n \"\"\"\n import traceback\n return any(\n cls.frame_file_is_setup(frame)\n for frame, line in traceback.walk_stack(None)\n )\n\n @classmethod\n def is_get_pip(cls):\n \"\"\"\n Detect if get-pip is being invoked. Ref #2993.\n \"\"\"\n try:\n import __main__\n return os.path.basename(__main__.__file__) == 'get-pip.py'\n except AttributeError:\n pass\n\n @staticmethod\n def frame_file_is_setup(frame):\n \"\"\"\n Return True if the indicated frame suggests a setup.py file.\n \"\"\"\n # some frames may not have __file__ (#2940)\n return frame.f_globals.get('__file__', '').endswith('setup.py')\n\n\nDISTUTILS_FINDER = DistutilsMetaFinder()\n\n\ndef add_shim():\n DISTUTILS_FINDER in sys.meta_path or insert_shim()\n\n\nclass shim:\n def __enter__(self):\n insert_shim()\n\n def __exit__(self, exc, value, tb):\n remove_shim()\n\n\ndef insert_shim():\n sys.meta_path.insert(0, DISTUTILS_FINDER)\n\n\ndef remove_shim():\n try:\n sys.meta_path.remove(DISTUTILS_FINDER)\n except ValueError:\n pass\n", "path": "_distutils_hack/__init__.py"}]} | 3,022 | 301 |
gh_patches_debug_17555 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-5324 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of extensions/base_extension.py]
1 import hashlib
2 import os
3 from abc import ABC, abstractmethod
4 from typing import Union
5
6 __all__ = ["_Extension"]
7
8
9 class _Extension(ABC):
10 def __init__(self, name: str, support_aot: bool, support_jit: bool, priority: int = 1):
11 self._name = name
12 self._support_aot = support_aot
13 self._support_jit = support_jit
14 self.priority = priority
15
16 @property
17 def name(self):
18 return self._name
19
20 @property
21 def support_aot(self):
22 return self._support_aot
23
24 @property
25 def support_jit(self):
26 return self._support_jit
27
28 @staticmethod
29 def get_jit_extension_folder_path():
30 """
31 Kernels which are compiled during runtime will be stored in the same cache folder for reuse.
32 The folder is in the path ~/.cache/colossalai/torch_extensions/<cache-folder>.
33 The name of the <cache-folder> follows a common format:
34 torch<torch_version_major>.<torch_version_minor>_<device_name><device_version>-<hash>
35
36 The <hash> suffix is the hash value of the path of the `colossalai` file.
37 """
38 import torch
39
40 import colossalai
41 from colossalai.accelerator import get_accelerator
42
43 # get torch version
44 torch_version_major = torch.__version__.split(".")[0]
45 torch_version_minor = torch.__version__.split(".")[1]
46
47 # get device version
48 device_name = get_accelerator().name
49 device_version = get_accelerator().get_version()
50
51 # use colossalai's file path as hash
52 hash_suffix = hashlib.sha256(colossalai.__file__.encode()).hexdigest()
53
54 # concat
55 home_directory = os.path.expanduser("~")
56 extension_directory = f".cache/colossalai/torch_extensions/torch{torch_version_major}.{torch_version_minor}_{device_name}-{device_version}-{hash_suffix}"
57 cache_directory = os.path.join(home_directory, extension_directory)
58 return cache_directory
59
60 @abstractmethod
61 def is_hardware_available(self) -> bool:
62 """
63 Check if the hardware required by the kernel is available.
64 """
65
66 @abstractmethod
67 def assert_hardware_compatible(self) -> bool:
68 """
69 Check if the hardware required by the kernel is compatible.
70 """
71
72 @abstractmethod
73 def build_aot(self) -> Union["CppExtension", "CUDAExtension"]:
74 pass
75
76 @abstractmethod
77 def build_jit(self) -> None:
78 pass
79
80 @abstractmethod
81 def load(self):
82 pass
83
[end of extensions/base_extension.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/extensions/base_extension.py b/extensions/base_extension.py
--- a/extensions/base_extension.py
+++ b/extensions/base_extension.py
@@ -1,7 +1,7 @@
import hashlib
import os
from abc import ABC, abstractmethod
-from typing import Union
+from typing import Callable, Union
__all__ = ["_Extension"]
@@ -64,7 +64,7 @@
"""
@abstractmethod
- def assert_hardware_compatible(self) -> bool:
+ def assert_hardware_compatible(self) -> None:
"""
Check if the hardware required by the kernel is compatible.
"""
@@ -74,9 +74,9 @@
pass
@abstractmethod
- def build_jit(self) -> None:
+ def build_jit(self) -> Callable:
pass
@abstractmethod
- def load(self):
+ def load(self) -> Callable:
pass
| {"golden_diff": "diff --git a/extensions/base_extension.py b/extensions/base_extension.py\n--- a/extensions/base_extension.py\n+++ b/extensions/base_extension.py\n@@ -1,7 +1,7 @@\n import hashlib\n import os\n from abc import ABC, abstractmethod\n-from typing import Union\n+from typing import Callable, Union\n \n __all__ = [\"_Extension\"]\n \n@@ -64,7 +64,7 @@\n \"\"\"\n \n @abstractmethod\n- def assert_hardware_compatible(self) -> bool:\n+ def assert_hardware_compatible(self) -> None:\n \"\"\"\n Check if the hardware required by the kernel is compatible.\n \"\"\"\n@@ -74,9 +74,9 @@\n pass\n \n @abstractmethod\n- def build_jit(self) -> None:\n+ def build_jit(self) -> Callable:\n pass\n \n @abstractmethod\n- def load(self):\n+ def load(self) -> Callable:\n pass\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import hashlib\nimport os\nfrom abc import ABC, abstractmethod\nfrom typing import Union\n\n__all__ = [\"_Extension\"]\n\n\nclass _Extension(ABC):\n def __init__(self, name: str, support_aot: bool, support_jit: bool, priority: int = 1):\n self._name = name\n self._support_aot = support_aot\n self._support_jit = support_jit\n self.priority = priority\n\n @property\n def name(self):\n return self._name\n\n @property\n def support_aot(self):\n return self._support_aot\n\n @property\n def support_jit(self):\n return self._support_jit\n\n @staticmethod\n def get_jit_extension_folder_path():\n \"\"\"\n Kernels which are compiled during runtime will be stored in the same cache folder for reuse.\n The folder is in the path ~/.cache/colossalai/torch_extensions/<cache-folder>.\n The name of the <cache-folder> follows a common format:\n torch<torch_version_major>.<torch_version_minor>_<device_name><device_version>-<hash>\n\n The <hash> suffix is the hash value of the path of the `colossalai` file.\n \"\"\"\n import torch\n\n import colossalai\n from colossalai.accelerator import get_accelerator\n\n # get torch version\n torch_version_major = torch.__version__.split(\".\")[0]\n torch_version_minor = torch.__version__.split(\".\")[1]\n\n # get device version\n device_name = get_accelerator().name\n device_version = get_accelerator().get_version()\n\n # use colossalai's file path as hash\n hash_suffix = hashlib.sha256(colossalai.__file__.encode()).hexdigest()\n\n # concat\n home_directory = os.path.expanduser(\"~\")\n extension_directory = f\".cache/colossalai/torch_extensions/torch{torch_version_major}.{torch_version_minor}_{device_name}-{device_version}-{hash_suffix}\"\n cache_directory = os.path.join(home_directory, extension_directory)\n return cache_directory\n\n @abstractmethod\n def is_hardware_available(self) -> bool:\n \"\"\"\n Check if the hardware required by the kernel is available.\n \"\"\"\n\n @abstractmethod\n def assert_hardware_compatible(self) -> bool:\n \"\"\"\n Check if the hardware required by the kernel is compatible.\n \"\"\"\n\n @abstractmethod\n def build_aot(self) -> Union[\"CppExtension\", \"CUDAExtension\"]:\n pass\n\n @abstractmethod\n def build_jit(self) -> None:\n pass\n\n @abstractmethod\n def load(self):\n pass\n", "path": "extensions/base_extension.py"}]} | 1,290 | 201 |
gh_patches_debug_407 | rasdani/github-patches | git_diff | wemake-services__wemake-python-styleguide-200 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Feature: allow magic numbers in async functions constructors
We check that some magic numbers can be used in function constructors like so:
```python
def some_function(price, delta=0.1):
return price * delta
```
But, we only allow regular functions, not `async` ones: https://github.com/wemake-services/wemake-python-styleguide/blob/master/wemake_python_styleguide/visitors/ast/numbers.py#L19-L21
What we need to do is:
1. Add `ast.AsyncFunctionDef` to the allowed list
2. Write a unit test for it: https://github.com/wemake-services/wemake-python-styleguide/blob/master/tests/test_visitors/test_ast/test_general/test_magic_numbers.py
</issue>
<code>
[start of wemake_python_styleguide/visitors/ast/numbers.py]
1 # -*- coding: utf-8 -*-
2
3 import ast
4 from typing import Optional
5
6 from wemake_python_styleguide.constants import MAGIC_NUMBERS_WHITELIST
7 from wemake_python_styleguide.violations.best_practices import (
8 MagicNumberViolation,
9 )
10 from wemake_python_styleguide.visitors.base import BaseNodeVisitor
11
12
13 class MagicNumberVisitor(BaseNodeVisitor):
14 """Checks magic numbers used in the code."""
15
16 _ALLOWED_PARENTS = (
17 ast.Assign,
18
19 # Constructor usages:
20 ast.FunctionDef,
21 ast.arguments,
22
23 # Primitives:
24 ast.List,
25 ast.Dict,
26 ast.Set,
27 ast.Tuple,
28 )
29
30 # TODO: make consistent naming rules for class attributes:
31 _PROXY_PARENTS = (
32 ast.UnaryOp,
33 )
34
35 def _get_real_parent(self, node: Optional[ast.AST]) -> Optional[ast.AST]:
36 """
37 Returns real number's parent.
38
39 What can go wrong?
40
41 1. Number can be negative: ``x = -1``,
42 so ``1`` has ``UnaryOp`` as parent, but should return ``Assign``
43
44 """
45 parent = getattr(node, 'parent', None)
46 if isinstance(parent, self._PROXY_PARENTS):
47 return self._get_real_parent(parent)
48 return parent
49
50 def _check_is_magic(self, node: ast.Num) -> None:
51 parent = self._get_real_parent(node)
52 if isinstance(parent, self._ALLOWED_PARENTS):
53 return
54
55 if node.n in MAGIC_NUMBERS_WHITELIST:
56 return
57
58 if isinstance(node.n, int) and node.n <= 10:
59 return
60
61 self.add_violation(MagicNumberViolation(node, text=str(node.n)))
62
63 def visit_Num(self, node: ast.Num) -> None:
64 """
65 Checks numbers not to be magic constants inside the code.
66
67 Raises:
68 MagicNumberViolation
69
70 """
71 self._check_is_magic(node)
72 self.generic_visit(node)
73
[end of wemake_python_styleguide/visitors/ast/numbers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wemake_python_styleguide/visitors/ast/numbers.py b/wemake_python_styleguide/visitors/ast/numbers.py
--- a/wemake_python_styleguide/visitors/ast/numbers.py
+++ b/wemake_python_styleguide/visitors/ast/numbers.py
@@ -18,6 +18,7 @@
# Constructor usages:
ast.FunctionDef,
+ ast.AsyncFunctionDef,
ast.arguments,
# Primitives:
| {"golden_diff": "diff --git a/wemake_python_styleguide/visitors/ast/numbers.py b/wemake_python_styleguide/visitors/ast/numbers.py\n--- a/wemake_python_styleguide/visitors/ast/numbers.py\n+++ b/wemake_python_styleguide/visitors/ast/numbers.py\n@@ -18,6 +18,7 @@\n \n # Constructor usages:\n ast.FunctionDef,\n+ ast.AsyncFunctionDef,\n ast.arguments,\n \n # Primitives:\n", "issue": "Feature: allow magic numbers in async functions constructors\nWe check that some magic numbers can be used in function constructors like so:\r\n\r\n```python\r\ndef some_function(price, delta=0.1):\r\n return price * delta\r\n```\r\n\r\nBut, we only allow regular functions, not `async` ones: https://github.com/wemake-services/wemake-python-styleguide/blob/master/wemake_python_styleguide/visitors/ast/numbers.py#L19-L21\r\n\r\nWhat we need to do is:\r\n1. Add `ast.AsyncFunctionDef` to the allowed list\r\n2. Write a unit test for it: https://github.com/wemake-services/wemake-python-styleguide/blob/master/tests/test_visitors/test_ast/test_general/test_magic_numbers.py\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom typing import Optional\n\nfrom wemake_python_styleguide.constants import MAGIC_NUMBERS_WHITELIST\nfrom wemake_python_styleguide.violations.best_practices import (\n MagicNumberViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\n\n\nclass MagicNumberVisitor(BaseNodeVisitor):\n \"\"\"Checks magic numbers used in the code.\"\"\"\n\n _ALLOWED_PARENTS = (\n ast.Assign,\n\n # Constructor usages:\n ast.FunctionDef,\n ast.arguments,\n\n # Primitives:\n ast.List,\n ast.Dict,\n ast.Set,\n ast.Tuple,\n )\n\n # TODO: make consistent naming rules for class attributes:\n _PROXY_PARENTS = (\n ast.UnaryOp,\n )\n\n def _get_real_parent(self, node: Optional[ast.AST]) -> Optional[ast.AST]:\n \"\"\"\n Returns real number's parent.\n\n What can go wrong?\n\n 1. Number can be negative: ``x = -1``,\n so ``1`` has ``UnaryOp`` as parent, but should return ``Assign``\n\n \"\"\"\n parent = getattr(node, 'parent', None)\n if isinstance(parent, self._PROXY_PARENTS):\n return self._get_real_parent(parent)\n return parent\n\n def _check_is_magic(self, node: ast.Num) -> None:\n parent = self._get_real_parent(node)\n if isinstance(parent, self._ALLOWED_PARENTS):\n return\n\n if node.n in MAGIC_NUMBERS_WHITELIST:\n return\n\n if isinstance(node.n, int) and node.n <= 10:\n return\n\n self.add_violation(MagicNumberViolation(node, text=str(node.n)))\n\n def visit_Num(self, node: ast.Num) -> None:\n \"\"\"\n Checks numbers not to be magic constants inside the code.\n\n Raises:\n MagicNumberViolation\n\n \"\"\"\n self._check_is_magic(node)\n self.generic_visit(node)\n", "path": "wemake_python_styleguide/visitors/ast/numbers.py"}]} | 1,289 | 109 |
gh_patches_debug_21827 | rasdani/github-patches | git_diff | iterative__dvc-151 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unconfigured git error
Git repository might be created but git might be not configured.
The results of this failure:
1. .gitignore file was not created.
2. change was not committed
```
c:\src\tmp>dvc init
Directories data/, .cache/ and .state/ were created
Error. Git command error (git commit -m DVC init. data dir data, cache dir .cache, state dir .state):
*** Please tell me who you are.
Run
git config --global user.email "[email protected]"
git config --global user.name "Your Name"
to set your account's default identity.
Omit --global to set the identity only in this repository.
fatal: empty ident name (for <(NULL)>) not allowed
```
</issue>
<code>
[start of dvc/git_wrapper.py]
1 import os
2
3 from dvc.logger import Logger
4 from dvc.config import Config
5 from dvc.executor import Executor, ExecutorError
6 from dvc.system import System
7
8
9 class GitWrapperI(object):
10 COMMIT_LEN = 7
11
12 def __init__(self, git_dir=None, commit=None):
13 self._git_dir = git_dir
14 self._commit = commit
15
16 @property
17 def git_dir(self):
18 return self._git_dir
19
20 @property
21 def lock_file(self):
22 return os.path.join(self.git_dir_abs, '.' + Config.CONFIG + '.lock')
23
24 @property
25 def git_dir_abs(self):
26 return System.realpath(self.git_dir)
27
28 @property
29 def curr_dir_abs(self):
30 return os.path.abspath(os.curdir)
31
32 @property
33 def curr_commit(self):
34 return self._commit
35
36 def is_ready_to_go(self):
37 return True
38
39 @staticmethod
40 def git_file_statuses():
41 Logger.debug('[dvc-git] Getting file statuses. Command: git status --porcelain')
42 code, out, err = Executor.exec_cmd(['git', 'status', '--porcelain'])
43 if code != 0:
44 raise ExecutorError('[dvc-git] File status command error - {}'.format(err))
45 Logger.debug('[dvc-git] Getting file statuses. Success.')
46
47 return GitWrapper.parse_porcelain_files(out)
48
49 @staticmethod
50 def git_path_to_system_path(path):
51 if os.name == 'nt':
52 return path.replace('/', '\\')
53 return path
54
55 @staticmethod
56 def parse_porcelain_files(out):
57 result = []
58 if len(out) > 0:
59 lines = out.split('\n')
60 for line in lines:
61 status = line[:2]
62 file = GitWrapperI.git_path_to_system_path(line[3:])
63 result.append((status, file))
64 return result
65
66 def abs_paths_to_dvc(self, files):
67 result = []
68 for file in files:
69 result.append(os.path.relpath(os.path.abspath(file), self.git_dir_abs))
70
71 return result
72
73 def commit_all_changes_and_log_status(self, message):
74 pass
75
76
77 class GitWrapper(GitWrapperI):
78 def __init__(self):
79 super(GitWrapper, self).__init__()
80
81 def is_ready_to_go(self):
82 statuses = self.git_file_statuses()
83 if len(statuses) > 0:
84 Logger.error('[dvc-git] Commit all changed files before running reproducible command. Changed files:')
85 for status, file in statuses:
86 Logger.error("{} {}".format(status, file))
87 return False
88
89 return True
90
91 @property
92 def curr_dir_dvc(self):
93 return os.path.relpath(os.path.abspath(os.curdir), self.git_dir_abs)
94
95 @property
96 def git_dir(self):
97 if self._git_dir:
98 return self._git_dir
99
100 try:
101 Logger.debug('[dvc-git] Getting git directory. Command: git rev-parse --show-toplevel')
102 code, out, err = Executor.exec_cmd(['git', 'rev-parse', '--show-toplevel'])
103
104 if code != 0:
105 raise ExecutorError('[dvc-git] Git directory command error - {}'.format(err))
106 Logger.debug('[dvc-git] Getting git directory. Success.')
107
108 self._git_dir = out
109 return self._git_dir
110 except ExecutorError:
111 raise
112 except Exception as e:
113 raise ExecutorError('Unable to run git command: {}'.format(e))
114 pass
115
116 @property
117 def curr_commit(self):
118 Logger.debug('[dvc-git] Getting current git commit. Command: git rev-parse --short HEAD')
119
120 code, out, err = Executor.exec_cmd(['git', 'rev-parse', '--short', 'HEAD'])
121 if code != 0:
122 raise ExecutorError('[dvc-git] Commit command error - {}'.format(err))
123 Logger.debug('[dvc-git] Getting current git commit. Success.')
124 return out
125
126 @staticmethod
127 def commit_all_changes(message):
128 Logger.debug('[dvc-git] Commit all changes. Commands: {} && {} && {}'.format(
129 'git add --all', 'git status --porcelain', 'git commit -m'))
130
131 Executor.exec_cmd_only_success(['git', 'add', '--all'])
132 out_status = Executor.exec_cmd_only_success(['git', 'status', '--porcelain'])
133 Executor.exec_cmd_only_success(['git', 'commit', '-m', message])
134 Logger.debug('[dvc-git] Commit all changes. Success.')
135
136 return GitWrapper.parse_porcelain_files(out_status)
137
138 def commit_all_changes_and_log_status(self, message):
139 statuses = self.commit_all_changes(message)
140 Logger.debug('[dvc-git] A new commit {} was made in the current branch. Added files:'.format(
141 self.curr_commit))
142 for status, file in statuses:
143 Logger.debug('[dvc-git]\t{} {}'.format(status, file))
144 pass
145
146 @staticmethod
147 def abs_paths_to_relative(files):
148 cur_dir = System.realpath(os.curdir)
149
150 result = []
151 for file in files:
152 result.append(os.path.relpath(System.realpath(file), cur_dir))
153
154 return result
155
156 def dvc_paths_to_abs(self, files):
157 results = []
158
159 for file in files:
160 results.append(os.path.join(self.git_dir_abs, file))
161
162 return results
163
164 def were_files_changed(self, code_dependencies, path_factory, changed_files):
165 code_files, code_dirs = self.separate_dependency_files_and_dirs(code_dependencies)
166 code_files_set = set([path_factory.path(x).dvc for x in code_files])
167 for changed_file in changed_files:
168 if changed_file in code_files_set:
169 return True
170
171 for dir in code_dirs:
172 if changed_file.startswith(dir):
173 return True
174
175 return False
176
177 @staticmethod
178 def get_changed_files(target_commit):
179 Logger.debug('[dvc-git] Identify changes. Command: git diff --name-only HEAD {}'.format(
180 target_commit))
181
182 changed_files_str = Executor.exec_cmd_only_success(['git', 'diff', '--name-only', 'HEAD', target_commit])
183 changed_files = changed_files_str.strip('"').split('\n')
184
185 Logger.debug('[dvc-git] Identify changes. Success. Changed files: {}'.format(
186 ', '.join(changed_files)))
187 return changed_files
188
189 @staticmethod
190 def get_target_commit(file):
191 try:
192 commit = Executor.exec_cmd_only_success(['git', 'log', '-1', '--pretty=format:"%h"', file])
193 return commit.strip('"')
194 except ExecutorError:
195 return None
196
197 def separate_dependency_files_and_dirs(self, code_dependencies):
198 code_files = []
199 code_dirs = []
200
201 code_dependencies_abs = self.dvc_paths_to_abs(code_dependencies)
202 for code in code_dependencies_abs:
203 if os.path.isdir(code):
204 code_dirs.append(code)
205 else:
206 code_files.append(code)
207
208 return code_files, code_dirs
209
[end of dvc/git_wrapper.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dvc/git_wrapper.py b/dvc/git_wrapper.py
--- a/dvc/git_wrapper.py
+++ b/dvc/git_wrapper.py
@@ -46,6 +46,15 @@
return GitWrapper.parse_porcelain_files(out)
+ @staticmethod
+ def git_config_get(name):
+ code, out, err = Executor.exec_cmd(['git', 'config', '--get', name])
+ Logger.debug('[dvc-git] "git config --get {}": code({}), out({}), err({})'.format(
+ name, code, out, err))
+ if code != 0:
+ return None
+ return out
+
@staticmethod
def git_path_to_system_path(path):
if os.name == 'nt':
@@ -86,6 +95,17 @@
Logger.error("{} {}".format(status, file))
return False
+ # Sanity check to make sure we will be able to commit
+ name = self.git_config_get('user.name')
+ if name == None:
+ Logger.error('[dvc-git] Please setup user.name in git config')
+ return False
+
+ email = self.git_config_get('user.email')
+ if email == None:
+ Logger.error('[dvc-git] Please setup user.email in git config')
+ return False
+
return True
@property
| {"golden_diff": "diff --git a/dvc/git_wrapper.py b/dvc/git_wrapper.py\n--- a/dvc/git_wrapper.py\n+++ b/dvc/git_wrapper.py\n@@ -46,6 +46,15 @@\n \n return GitWrapper.parse_porcelain_files(out)\n \n+ @staticmethod\n+ def git_config_get(name):\n+ code, out, err = Executor.exec_cmd(['git', 'config', '--get', name])\n+ Logger.debug('[dvc-git] \"git config --get {}\": code({}), out({}), err({})'.format(\n+ name, code, out, err))\n+ if code != 0:\n+ return None\n+ return out\n+\n @staticmethod\n def git_path_to_system_path(path):\n if os.name == 'nt':\n@@ -86,6 +95,17 @@\n Logger.error(\"{} {}\".format(status, file))\n return False\n \n+ # Sanity check to make sure we will be able to commit\n+ name = self.git_config_get('user.name')\n+ if name == None:\n+ Logger.error('[dvc-git] Please setup user.name in git config')\n+ return False\n+\n+ email = self.git_config_get('user.email')\n+ if email == None:\n+ Logger.error('[dvc-git] Please setup user.email in git config')\n+ return False\n+\n return True\n \n @property\n", "issue": "Unconfigured git error\nGit repository might be created but git might be not configured.\r\nThe results of this failure:\r\n1. .gitignore file was not created.\r\n2. change was not committed\r\n\r\n```\r\nc:\\src\\tmp>dvc init\r\nDirectories data/, .cache/ and .state/ were created\r\nError. Git command error (git commit -m DVC init. data dir data, cache dir .cache, state dir .state):\r\n*** Please tell me who you are.\r\n\r\nRun\r\n\r\n git config --global user.email \"[email protected]\"\r\n git config --global user.name \"Your Name\"\r\n\r\nto set your account's default identity.\r\nOmit --global to set the identity only in this repository.\r\n\r\nfatal: empty ident name (for <(NULL)>) not allowed\r\n```\n", "before_files": [{"content": "import os\n\nfrom dvc.logger import Logger\nfrom dvc.config import Config\nfrom dvc.executor import Executor, ExecutorError\nfrom dvc.system import System\n\n\nclass GitWrapperI(object):\n COMMIT_LEN = 7\n\n def __init__(self, git_dir=None, commit=None):\n self._git_dir = git_dir\n self._commit = commit\n\n @property\n def git_dir(self):\n return self._git_dir\n\n @property\n def lock_file(self):\n return os.path.join(self.git_dir_abs, '.' + Config.CONFIG + '.lock')\n\n @property\n def git_dir_abs(self):\n return System.realpath(self.git_dir)\n\n @property\n def curr_dir_abs(self):\n return os.path.abspath(os.curdir)\n\n @property\n def curr_commit(self):\n return self._commit\n\n def is_ready_to_go(self):\n return True\n\n @staticmethod\n def git_file_statuses():\n Logger.debug('[dvc-git] Getting file statuses. Command: git status --porcelain')\n code, out, err = Executor.exec_cmd(['git', 'status', '--porcelain'])\n if code != 0:\n raise ExecutorError('[dvc-git] File status command error - {}'.format(err))\n Logger.debug('[dvc-git] Getting file statuses. Success.')\n\n return GitWrapper.parse_porcelain_files(out)\n\n @staticmethod\n def git_path_to_system_path(path):\n if os.name == 'nt':\n return path.replace('/', '\\\\')\n return path\n\n @staticmethod\n def parse_porcelain_files(out):\n result = []\n if len(out) > 0:\n lines = out.split('\\n')\n for line in lines:\n status = line[:2]\n file = GitWrapperI.git_path_to_system_path(line[3:])\n result.append((status, file))\n return result\n\n def abs_paths_to_dvc(self, files):\n result = []\n for file in files:\n result.append(os.path.relpath(os.path.abspath(file), self.git_dir_abs))\n\n return result\n\n def commit_all_changes_and_log_status(self, message):\n pass\n\n\nclass GitWrapper(GitWrapperI):\n def __init__(self):\n super(GitWrapper, self).__init__()\n\n def is_ready_to_go(self):\n statuses = self.git_file_statuses()\n if len(statuses) > 0:\n Logger.error('[dvc-git] Commit all changed files before running reproducible command. Changed files:')\n for status, file in statuses:\n Logger.error(\"{} {}\".format(status, file))\n return False\n\n return True\n\n @property\n def curr_dir_dvc(self):\n return os.path.relpath(os.path.abspath(os.curdir), self.git_dir_abs)\n\n @property\n def git_dir(self):\n if self._git_dir:\n return self._git_dir\n\n try:\n Logger.debug('[dvc-git] Getting git directory. Command: git rev-parse --show-toplevel')\n code, out, err = Executor.exec_cmd(['git', 'rev-parse', '--show-toplevel'])\n\n if code != 0:\n raise ExecutorError('[dvc-git] Git directory command error - {}'.format(err))\n Logger.debug('[dvc-git] Getting git directory. Success.')\n\n self._git_dir = out\n return self._git_dir\n except ExecutorError:\n raise\n except Exception as e:\n raise ExecutorError('Unable to run git command: {}'.format(e))\n pass\n\n @property\n def curr_commit(self):\n Logger.debug('[dvc-git] Getting current git commit. Command: git rev-parse --short HEAD')\n\n code, out, err = Executor.exec_cmd(['git', 'rev-parse', '--short', 'HEAD'])\n if code != 0:\n raise ExecutorError('[dvc-git] Commit command error - {}'.format(err))\n Logger.debug('[dvc-git] Getting current git commit. Success.')\n return out\n\n @staticmethod\n def commit_all_changes(message):\n Logger.debug('[dvc-git] Commit all changes. Commands: {} && {} && {}'.format(\n 'git add --all', 'git status --porcelain', 'git commit -m'))\n\n Executor.exec_cmd_only_success(['git', 'add', '--all'])\n out_status = Executor.exec_cmd_only_success(['git', 'status', '--porcelain'])\n Executor.exec_cmd_only_success(['git', 'commit', '-m', message])\n Logger.debug('[dvc-git] Commit all changes. Success.')\n\n return GitWrapper.parse_porcelain_files(out_status)\n\n def commit_all_changes_and_log_status(self, message):\n statuses = self.commit_all_changes(message)\n Logger.debug('[dvc-git] A new commit {} was made in the current branch. Added files:'.format(\n self.curr_commit))\n for status, file in statuses:\n Logger.debug('[dvc-git]\\t{} {}'.format(status, file))\n pass\n\n @staticmethod\n def abs_paths_to_relative(files):\n cur_dir = System.realpath(os.curdir)\n\n result = []\n for file in files:\n result.append(os.path.relpath(System.realpath(file), cur_dir))\n\n return result\n\n def dvc_paths_to_abs(self, files):\n results = []\n\n for file in files:\n results.append(os.path.join(self.git_dir_abs, file))\n\n return results\n\n def were_files_changed(self, code_dependencies, path_factory, changed_files):\n code_files, code_dirs = self.separate_dependency_files_and_dirs(code_dependencies)\n code_files_set = set([path_factory.path(x).dvc for x in code_files])\n for changed_file in changed_files:\n if changed_file in code_files_set:\n return True\n\n for dir in code_dirs:\n if changed_file.startswith(dir):\n return True\n\n return False\n\n @staticmethod\n def get_changed_files(target_commit):\n Logger.debug('[dvc-git] Identify changes. Command: git diff --name-only HEAD {}'.format(\n target_commit))\n\n changed_files_str = Executor.exec_cmd_only_success(['git', 'diff', '--name-only', 'HEAD', target_commit])\n changed_files = changed_files_str.strip('\"').split('\\n')\n\n Logger.debug('[dvc-git] Identify changes. Success. Changed files: {}'.format(\n ', '.join(changed_files)))\n return changed_files\n\n @staticmethod\n def get_target_commit(file):\n try:\n commit = Executor.exec_cmd_only_success(['git', 'log', '-1', '--pretty=format:\"%h\"', file])\n return commit.strip('\"')\n except ExecutorError:\n return None\n\n def separate_dependency_files_and_dirs(self, code_dependencies):\n code_files = []\n code_dirs = []\n\n code_dependencies_abs = self.dvc_paths_to_abs(code_dependencies)\n for code in code_dependencies_abs:\n if os.path.isdir(code):\n code_dirs.append(code)\n else:\n code_files.append(code)\n\n return code_files, code_dirs\n", "path": "dvc/git_wrapper.py"}]} | 2,727 | 304 |
gh_patches_debug_19879 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-2573 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Documentation of ERGAS
## 📚 Documentation
It seems to me that the documentation of ERGAS are not consistent with the code. In the documentation the ratio h/l is multiplied with the rest, in the code it is a divisor. I think the confusion originates from the paper that is marked as the source. In the paper the ratio h/l < 1. The way the documentation states it, h/l should be > 1. Page 161 seems to give some insights on that.
The bug regarding these incosistencies was fixed in april, but now the documentation is not so clear and not consistent with the literature.
Thanks!
</issue>
<code>
[start of src/torchmetrics/image/ergas.py]
1 # Copyright The Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Any, List, Optional, Sequence, Union
16
17 from torch import Tensor
18 from typing_extensions import Literal
19
20 from torchmetrics.functional.image.ergas import _ergas_compute, _ergas_update
21 from torchmetrics.metric import Metric
22 from torchmetrics.utilities import rank_zero_warn
23 from torchmetrics.utilities.data import dim_zero_cat
24 from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE
25 from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE
26
27 if not _MATPLOTLIB_AVAILABLE:
28 __doctest_skip__ = ["ErrorRelativeGlobalDimensionlessSynthesis.plot"]
29
30
31 class ErrorRelativeGlobalDimensionlessSynthesis(Metric):
32 r"""Calculate the `Error relative global dimensionless synthesis`_ (ERGAS) metric.
33
34 This metric is used to calculate the accuracy of Pan sharpened image considering normalized average error of each
35 band of the result image. It is defined as:
36
37 .. math::
38 ERGAS = 100 \cdot \frac{h}{l} \cdot \sqrt{\frac{1}{N} \sum_{k=1}^{N} \frac{RMSE(B_k)^2}{\mu_k^2}}
39
40 where :math:`h` and :math:`l` denote the spatial resolution (pixel size) of the high and low resolution images,
41 often shorted to the ratio between them :math:`r=h/l`. :math:`N` is the number of spectral bands, :math:`RMSE(B_k)`
42 is the root mean square error of the k-th band between low and high resolution images, and :math:`\\mu_k` is the
43 mean value of the k-th band of the reference image.
44
45 As input to ``forward`` and ``update`` the metric accepts the following input
46
47 - ``preds`` (:class:`~torch.Tensor`): Predictions from model
48 - ``target`` (:class:`~torch.Tensor`): Ground truth values
49
50 As output of `forward` and `compute` the metric returns the following output
51
52 - ``ergas`` (:class:`~torch.Tensor`): if ``reduction!='none'`` returns float scalar tensor with average ERGAS
53 value over sample else returns tensor of shape ``(N,)`` with ERGAS values per sample
54
55 Args:
56 ratio: ratio of high resolution to low resolution.
57 reduction: a method to reduce metric score over labels.
58
59 - ``'elementwise_mean'``: takes the mean (default)
60 - ``'sum'``: takes the sum
61 - ``'none'`` or ``None``: no reduction will be applied
62
63 kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
64
65 Example:
66 >>> import torch
67 >>> from torchmetrics.image import ErrorRelativeGlobalDimensionlessSynthesis
68 >>> preds = torch.rand([16, 1, 16, 16], generator=torch.manual_seed(42))
69 >>> target = preds * 0.75
70 >>> ergas = ErrorRelativeGlobalDimensionlessSynthesis()
71 >>> torch.round(ergas(preds, target))
72 tensor(10.)
73
74 """
75
76 higher_is_better: bool = False
77 is_differentiable: bool = True
78 full_state_update: bool = False
79 plot_lower_bound: float = 0.0
80
81 preds: List[Tensor]
82 target: List[Tensor]
83
84 def __init__(
85 self,
86 ratio: float = 4,
87 reduction: Literal["elementwise_mean", "sum", "none", None] = "elementwise_mean",
88 **kwargs: Any,
89 ) -> None:
90 super().__init__(**kwargs)
91 rank_zero_warn(
92 "Metric `UniversalImageQualityIndex` will save all targets and"
93 " predictions in buffer. For large datasets this may lead"
94 " to large memory footprint."
95 )
96
97 self.add_state("preds", default=[], dist_reduce_fx="cat")
98 self.add_state("target", default=[], dist_reduce_fx="cat")
99 self.ratio = ratio
100 self.reduction = reduction
101
102 def update(self, preds: Tensor, target: Tensor) -> None:
103 """Update state with predictions and targets."""
104 preds, target = _ergas_update(preds, target)
105 self.preds.append(preds)
106 self.target.append(target)
107
108 def compute(self) -> Tensor:
109 """Compute explained variance over state."""
110 preds = dim_zero_cat(self.preds)
111 target = dim_zero_cat(self.target)
112 return _ergas_compute(preds, target, self.ratio, self.reduction)
113
114 def plot(
115 self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None
116 ) -> _PLOT_OUT_TYPE:
117 """Plot a single or multiple values from the metric.
118
119 Args:
120 val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.
121 If no value is provided, will automatically call `metric.compute` and plot that result.
122 ax: An matplotlib axis object. If provided will add plot to that axis
123
124 Returns:
125 Figure and Axes object
126
127 Raises:
128 ModuleNotFoundError:
129 If `matplotlib` is not installed
130
131 .. plot::
132 :scale: 75
133
134 >>> # Example plotting a single value
135 >>> import torch
136 >>> from torchmetrics.image import ErrorRelativeGlobalDimensionlessSynthesis
137 >>> preds = torch.rand([16, 1, 16, 16], generator=torch.manual_seed(42))
138 >>> target = preds * 0.75
139 >>> metric = ErrorRelativeGlobalDimensionlessSynthesis()
140 >>> metric.update(preds, target)
141 >>> fig_, ax_ = metric.plot()
142
143 .. plot::
144 :scale: 75
145
146 >>> # Example plotting multiple values
147 >>> import torch
148 >>> from torchmetrics.image import ErrorRelativeGlobalDimensionlessSynthesis
149 >>> preds = torch.rand([16, 1, 16, 16], generator=torch.manual_seed(42))
150 >>> target = preds * 0.75
151 >>> metric = ErrorRelativeGlobalDimensionlessSynthesis()
152 >>> values = [ ]
153 >>> for _ in range(10):
154 ... values.append(metric(preds, target))
155 >>> fig_, ax_ = metric.plot(values)
156
157 """
158 return self._plot(val, ax)
159
[end of src/torchmetrics/image/ergas.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/torchmetrics/image/ergas.py b/src/torchmetrics/image/ergas.py
--- a/src/torchmetrics/image/ergas.py
+++ b/src/torchmetrics/image/ergas.py
@@ -35,12 +35,11 @@
band of the result image. It is defined as:
.. math::
- ERGAS = 100 \cdot \frac{h}{l} \cdot \sqrt{\frac{1}{N} \sum_{k=1}^{N} \frac{RMSE(B_k)^2}{\mu_k^2}}
+ ERGAS = \frac{100}{r} \cdot \sqrt{\frac{1}{N} \sum_{k=1}^{N} \frac{RMSE(B_k)^2}{\mu_k^2}}
- where :math:`h` and :math:`l` denote the spatial resolution (pixel size) of the high and low resolution images,
- often shorted to the ratio between them :math:`r=h/l`. :math:`N` is the number of spectral bands, :math:`RMSE(B_k)`
- is the root mean square error of the k-th band between low and high resolution images, and :math:`\\mu_k` is the
- mean value of the k-th band of the reference image.
+ where :math:`r=h/l` denote the ratio in spatial resolution (pixel size) between the high and low resolution images.
+ :math:`N` is the number of spectral bands, :math:`RMSE(B_k)` is the root mean square error of the k-th band between
+ low and high resolution images, and :math:`\\mu_k` is the mean value of the k-th band of the reference image.
As input to ``forward`` and ``update`` the metric accepts the following input
| {"golden_diff": "diff --git a/src/torchmetrics/image/ergas.py b/src/torchmetrics/image/ergas.py\n--- a/src/torchmetrics/image/ergas.py\n+++ b/src/torchmetrics/image/ergas.py\n@@ -35,12 +35,11 @@\n band of the result image. It is defined as:\n \n .. math::\n- ERGAS = 100 \\cdot \\frac{h}{l} \\cdot \\sqrt{\\frac{1}{N} \\sum_{k=1}^{N} \\frac{RMSE(B_k)^2}{\\mu_k^2}}\n+ ERGAS = \\frac{100}{r} \\cdot \\sqrt{\\frac{1}{N} \\sum_{k=1}^{N} \\frac{RMSE(B_k)^2}{\\mu_k^2}}\n \n- where :math:`h` and :math:`l` denote the spatial resolution (pixel size) of the high and low resolution images,\n- often shorted to the ratio between them :math:`r=h/l`. :math:`N` is the number of spectral bands, :math:`RMSE(B_k)`\n- is the root mean square error of the k-th band between low and high resolution images, and :math:`\\\\mu_k` is the\n- mean value of the k-th band of the reference image.\n+ where :math:`r=h/l` denote the ratio in spatial resolution (pixel size) between the high and low resolution images.\n+ :math:`N` is the number of spectral bands, :math:`RMSE(B_k)` is the root mean square error of the k-th band between\n+ low and high resolution images, and :math:`\\\\mu_k` is the mean value of the k-th band of the reference image.\n \n As input to ``forward`` and ``update`` the metric accepts the following input\n", "issue": "Documentation of ERGAS\n## \ud83d\udcda Documentation\r\n\r\nIt seems to me that the documentation of ERGAS are not consistent with the code. In the documentation the ratio h/l is multiplied with the rest, in the code it is a divisor. I think the confusion originates from the paper that is marked as the source. In the paper the ratio h/l < 1. The way the documentation states it, h/l should be > 1. Page 161 seems to give some insights on that.\r\nThe bug regarding these incosistencies was fixed in april, but now the documentation is not so clear and not consistent with the literature. \r\n\r\nThanks!\r\n\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, List, Optional, Sequence, Union\n\nfrom torch import Tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics.functional.image.ergas import _ergas_compute, _ergas_update\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities import rank_zero_warn\nfrom torchmetrics.utilities.data import dim_zero_cat\nfrom torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE\nfrom torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE\n\nif not _MATPLOTLIB_AVAILABLE:\n __doctest_skip__ = [\"ErrorRelativeGlobalDimensionlessSynthesis.plot\"]\n\n\nclass ErrorRelativeGlobalDimensionlessSynthesis(Metric):\n r\"\"\"Calculate the `Error relative global dimensionless synthesis`_ (ERGAS) metric.\n\n This metric is used to calculate the accuracy of Pan sharpened image considering normalized average error of each\n band of the result image. It is defined as:\n\n .. math::\n ERGAS = 100 \\cdot \\frac{h}{l} \\cdot \\sqrt{\\frac{1}{N} \\sum_{k=1}^{N} \\frac{RMSE(B_k)^2}{\\mu_k^2}}\n\n where :math:`h` and :math:`l` denote the spatial resolution (pixel size) of the high and low resolution images,\n often shorted to the ratio between them :math:`r=h/l`. :math:`N` is the number of spectral bands, :math:`RMSE(B_k)`\n is the root mean square error of the k-th band between low and high resolution images, and :math:`\\\\mu_k` is the\n mean value of the k-th band of the reference image.\n\n As input to ``forward`` and ``update`` the metric accepts the following input\n\n - ``preds`` (:class:`~torch.Tensor`): Predictions from model\n - ``target`` (:class:`~torch.Tensor`): Ground truth values\n\n As output of `forward` and `compute` the metric returns the following output\n\n - ``ergas`` (:class:`~torch.Tensor`): if ``reduction!='none'`` returns float scalar tensor with average ERGAS\n value over sample else returns tensor of shape ``(N,)`` with ERGAS values per sample\n\n Args:\n ratio: ratio of high resolution to low resolution.\n reduction: a method to reduce metric score over labels.\n\n - ``'elementwise_mean'``: takes the mean (default)\n - ``'sum'``: takes the sum\n - ``'none'`` or ``None``: no reduction will be applied\n\n kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.\n\n Example:\n >>> import torch\n >>> from torchmetrics.image import ErrorRelativeGlobalDimensionlessSynthesis\n >>> preds = torch.rand([16, 1, 16, 16], generator=torch.manual_seed(42))\n >>> target = preds * 0.75\n >>> ergas = ErrorRelativeGlobalDimensionlessSynthesis()\n >>> torch.round(ergas(preds, target))\n tensor(10.)\n\n \"\"\"\n\n higher_is_better: bool = False\n is_differentiable: bool = True\n full_state_update: bool = False\n plot_lower_bound: float = 0.0\n\n preds: List[Tensor]\n target: List[Tensor]\n\n def __init__(\n self,\n ratio: float = 4,\n reduction: Literal[\"elementwise_mean\", \"sum\", \"none\", None] = \"elementwise_mean\",\n **kwargs: Any,\n ) -> None:\n super().__init__(**kwargs)\n rank_zero_warn(\n \"Metric `UniversalImageQualityIndex` will save all targets and\"\n \" predictions in buffer. For large datasets this may lead\"\n \" to large memory footprint.\"\n )\n\n self.add_state(\"preds\", default=[], dist_reduce_fx=\"cat\")\n self.add_state(\"target\", default=[], dist_reduce_fx=\"cat\")\n self.ratio = ratio\n self.reduction = reduction\n\n def update(self, preds: Tensor, target: Tensor) -> None:\n \"\"\"Update state with predictions and targets.\"\"\"\n preds, target = _ergas_update(preds, target)\n self.preds.append(preds)\n self.target.append(target)\n\n def compute(self) -> Tensor:\n \"\"\"Compute explained variance over state.\"\"\"\n preds = dim_zero_cat(self.preds)\n target = dim_zero_cat(self.target)\n return _ergas_compute(preds, target, self.ratio, self.reduction)\n\n def plot(\n self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None\n ) -> _PLOT_OUT_TYPE:\n \"\"\"Plot a single or multiple values from the metric.\n\n Args:\n val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.\n If no value is provided, will automatically call `metric.compute` and plot that result.\n ax: An matplotlib axis object. If provided will add plot to that axis\n\n Returns:\n Figure and Axes object\n\n Raises:\n ModuleNotFoundError:\n If `matplotlib` is not installed\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting a single value\n >>> import torch\n >>> from torchmetrics.image import ErrorRelativeGlobalDimensionlessSynthesis\n >>> preds = torch.rand([16, 1, 16, 16], generator=torch.manual_seed(42))\n >>> target = preds * 0.75\n >>> metric = ErrorRelativeGlobalDimensionlessSynthesis()\n >>> metric.update(preds, target)\n >>> fig_, ax_ = metric.plot()\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting multiple values\n >>> import torch\n >>> from torchmetrics.image import ErrorRelativeGlobalDimensionlessSynthesis\n >>> preds = torch.rand([16, 1, 16, 16], generator=torch.manual_seed(42))\n >>> target = preds * 0.75\n >>> metric = ErrorRelativeGlobalDimensionlessSynthesis()\n >>> values = [ ]\n >>> for _ in range(10):\n ... values.append(metric(preds, target))\n >>> fig_, ax_ = metric.plot(values)\n\n \"\"\"\n return self._plot(val, ax)\n", "path": "src/torchmetrics/image/ergas.py"}]} | 2,582 | 412 |
gh_patches_debug_21157 | rasdani/github-patches | git_diff | ipython__ipython-11382 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
async-detection: nonlocal does not raise syntaxerror
See https://github.com/ipython/ipython/issues/11303#issuecomment-421297197
```
In [1]: x = 1
...: def f():
...: nonlocal x
...: x = 10000
```
Should raise but does not.
It's minor, but may be good to fix as behavior is likely undefined.
async-detection: nonlocal does not raise syntaxerror
See https://github.com/ipython/ipython/issues/11303#issuecomment-421297197
```
In [1]: x = 1
...: def f():
...: nonlocal x
...: x = 10000
```
Should raise but does not.
It's minor, but may be good to fix as behavior is likely undefined.
</issue>
<code>
[start of IPython/core/async_helpers.py]
1 """
2 Async helper function that are invalid syntax on Python 3.5 and below.
3
4 This code is best effort, and may have edge cases not behaving as expected. In
5 particular it contain a number of heuristics to detect whether code is
6 effectively async and need to run in an event loop or not.
7
8 Some constructs (like top-level `return`, or `yield`) are taken care of
9 explicitly to actually raise a SyntaxError and stay as close as possible to
10 Python semantics.
11 """
12
13
14 import ast
15 import sys
16 from textwrap import dedent, indent
17
18
19 class _AsyncIORunner:
20
21 def __call__(self, coro):
22 """
23 Handler for asyncio autoawait
24 """
25 import asyncio
26
27 return asyncio.get_event_loop().run_until_complete(coro)
28
29 def __str__(self):
30 return 'asyncio'
31
32 _asyncio_runner = _AsyncIORunner()
33
34
35 def _curio_runner(coroutine):
36 """
37 handler for curio autoawait
38 """
39 import curio
40
41 return curio.run(coroutine)
42
43
44 def _trio_runner(async_fn):
45 import trio
46
47 async def loc(coro):
48 """
49 We need the dummy no-op async def to protect from
50 trio's internal. See https://github.com/python-trio/trio/issues/89
51 """
52 return await coro
53
54 return trio.run(loc, async_fn)
55
56
57 def _pseudo_sync_runner(coro):
58 """
59 A runner that does not really allow async execution, and just advance the coroutine.
60
61 See discussion in https://github.com/python-trio/trio/issues/608,
62
63 Credit to Nathaniel Smith
64
65 """
66 try:
67 coro.send(None)
68 except StopIteration as exc:
69 return exc.value
70 else:
71 # TODO: do not raise but return an execution result with the right info.
72 raise RuntimeError(
73 "{coro_name!r} needs a real async loop".format(coro_name=coro.__name__)
74 )
75
76
77 def _asyncify(code: str) -> str:
78 """wrap code in async def definition.
79
80 And setup a bit of context to run it later.
81 """
82 res = dedent(
83 """
84 async def __wrapper__():
85 try:
86 {usercode}
87 finally:
88 locals()
89 """
90 ).format(usercode=indent(code, " " * 8))
91 return res
92
93
94 class _AsyncSyntaxErrorVisitor(ast.NodeVisitor):
95 """
96 Find syntax errors that would be an error in an async repl, but because
97 the implementation involves wrapping the repl in an async function, it
98 is erroneously allowed (e.g. yield or return at the top level)
99 """
100
101 def generic_visit(self, node):
102 func_types = (ast.FunctionDef, ast.AsyncFunctionDef)
103 invalid_types = (ast.Return, ast.Yield, ast.YieldFrom)
104
105 if isinstance(node, func_types):
106 return # Don't recurse into functions
107 elif isinstance(node, invalid_types):
108 raise SyntaxError()
109 else:
110 super().generic_visit(node)
111
112
113 def _async_parse_cell(cell: str) -> ast.AST:
114 """
115 This is a compatibility shim for pre-3.7 when async outside of a function
116 is a syntax error at the parse stage.
117
118 It will return an abstract syntax tree parsed as if async and await outside
119 of a function were not a syntax error.
120 """
121 if sys.version_info < (3, 7):
122 # Prior to 3.7 you need to asyncify before parse
123 wrapped_parse_tree = ast.parse(_asyncify(cell))
124 return wrapped_parse_tree.body[0].body[0]
125 else:
126 return ast.parse(cell)
127
128
129 def _should_be_async(cell: str) -> bool:
130 """Detect if a block of code need to be wrapped in an `async def`
131
132 Attempt to parse the block of code, it it compile we're fine.
133 Otherwise we wrap if and try to compile.
134
135 If it works, assume it should be async. Otherwise Return False.
136
137 Not handled yet: If the block of code has a return statement as the top
138 level, it will be seen as async. This is a know limitation.
139 """
140
141 try:
142 # we can't limit ourself to ast.parse, as it __accepts__ to parse on
143 # 3.7+, but just does not _compile_
144 compile(cell, "<>", "exec")
145 return False
146 except SyntaxError:
147 try:
148 parse_tree = _async_parse_cell(cell)
149
150 # Raise a SyntaxError if there are top-level return or yields
151 v = _AsyncSyntaxErrorVisitor()
152 v.visit(parse_tree)
153
154 except SyntaxError:
155 return False
156 return True
157 return False
158
[end of IPython/core/async_helpers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/IPython/core/async_helpers.py b/IPython/core/async_helpers.py
--- a/IPython/core/async_helpers.py
+++ b/IPython/core/async_helpers.py
@@ -97,14 +97,22 @@
the implementation involves wrapping the repl in an async function, it
is erroneously allowed (e.g. yield or return at the top level)
"""
+ def __init__(self):
+ self.depth = 0
+ super().__init__()
def generic_visit(self, node):
func_types = (ast.FunctionDef, ast.AsyncFunctionDef)
- invalid_types = (ast.Return, ast.Yield, ast.YieldFrom)
-
- if isinstance(node, func_types):
- return # Don't recurse into functions
- elif isinstance(node, invalid_types):
+ invalid_types_by_depth = {
+ 0: (ast.Return, ast.Yield, ast.YieldFrom),
+ 1: (ast.Nonlocal,)
+ }
+
+ should_traverse = self.depth < max(invalid_types_by_depth.keys())
+ if isinstance(node, func_types) and should_traverse:
+ self.depth += 1
+ super().generic_visit(node)
+ elif isinstance(node, invalid_types_by_depth[self.depth]):
raise SyntaxError()
else:
super().generic_visit(node)
| {"golden_diff": "diff --git a/IPython/core/async_helpers.py b/IPython/core/async_helpers.py\n--- a/IPython/core/async_helpers.py\n+++ b/IPython/core/async_helpers.py\n@@ -97,14 +97,22 @@\n the implementation involves wrapping the repl in an async function, it\n is erroneously allowed (e.g. yield or return at the top level)\n \"\"\"\n+ def __init__(self):\n+ self.depth = 0\n+ super().__init__()\n \n def generic_visit(self, node):\n func_types = (ast.FunctionDef, ast.AsyncFunctionDef)\n- invalid_types = (ast.Return, ast.Yield, ast.YieldFrom)\n-\n- if isinstance(node, func_types):\n- return # Don't recurse into functions\n- elif isinstance(node, invalid_types):\n+ invalid_types_by_depth = {\n+ 0: (ast.Return, ast.Yield, ast.YieldFrom),\n+ 1: (ast.Nonlocal,)\n+ }\n+\n+ should_traverse = self.depth < max(invalid_types_by_depth.keys())\n+ if isinstance(node, func_types) and should_traverse:\n+ self.depth += 1\n+ super().generic_visit(node)\n+ elif isinstance(node, invalid_types_by_depth[self.depth]):\n raise SyntaxError()\n else:\n super().generic_visit(node)\n", "issue": "async-detection: nonlocal does not raise syntaxerror\nSee https://github.com/ipython/ipython/issues/11303#issuecomment-421297197\r\n\r\n```\r\nIn [1]: x = 1\r\n ...: def f():\r\n ...: nonlocal x\r\n ...: x = 10000\r\n```\r\nShould raise but does not.\r\n\r\nIt's minor, but may be good to fix as behavior is likely undefined.\nasync-detection: nonlocal does not raise syntaxerror\nSee https://github.com/ipython/ipython/issues/11303#issuecomment-421297197\r\n\r\n```\r\nIn [1]: x = 1\r\n ...: def f():\r\n ...: nonlocal x\r\n ...: x = 10000\r\n```\r\nShould raise but does not.\r\n\r\nIt's minor, but may be good to fix as behavior is likely undefined.\n", "before_files": [{"content": "\"\"\"\nAsync helper function that are invalid syntax on Python 3.5 and below.\n\nThis code is best effort, and may have edge cases not behaving as expected. In\nparticular it contain a number of heuristics to detect whether code is\neffectively async and need to run in an event loop or not.\n\nSome constructs (like top-level `return`, or `yield`) are taken care of\nexplicitly to actually raise a SyntaxError and stay as close as possible to\nPython semantics.\n\"\"\"\n\n\nimport ast\nimport sys\nfrom textwrap import dedent, indent\n\n\nclass _AsyncIORunner:\n\n def __call__(self, coro):\n \"\"\"\n Handler for asyncio autoawait\n \"\"\"\n import asyncio\n\n return asyncio.get_event_loop().run_until_complete(coro)\n\n def __str__(self):\n return 'asyncio'\n\n_asyncio_runner = _AsyncIORunner()\n\n\ndef _curio_runner(coroutine):\n \"\"\"\n handler for curio autoawait\n \"\"\"\n import curio\n\n return curio.run(coroutine)\n\n\ndef _trio_runner(async_fn):\n import trio\n\n async def loc(coro):\n \"\"\"\n We need the dummy no-op async def to protect from\n trio's internal. See https://github.com/python-trio/trio/issues/89\n \"\"\"\n return await coro\n\n return trio.run(loc, async_fn)\n\n\ndef _pseudo_sync_runner(coro):\n \"\"\"\n A runner that does not really allow async execution, and just advance the coroutine.\n\n See discussion in https://github.com/python-trio/trio/issues/608,\n\n Credit to Nathaniel Smith\n\n \"\"\"\n try:\n coro.send(None)\n except StopIteration as exc:\n return exc.value\n else:\n # TODO: do not raise but return an execution result with the right info.\n raise RuntimeError(\n \"{coro_name!r} needs a real async loop\".format(coro_name=coro.__name__)\n )\n\n\ndef _asyncify(code: str) -> str:\n \"\"\"wrap code in async def definition.\n\n And setup a bit of context to run it later.\n \"\"\"\n res = dedent(\n \"\"\"\n async def __wrapper__():\n try:\n {usercode}\n finally:\n locals()\n \"\"\"\n ).format(usercode=indent(code, \" \" * 8))\n return res\n\n\nclass _AsyncSyntaxErrorVisitor(ast.NodeVisitor):\n \"\"\"\n Find syntax errors that would be an error in an async repl, but because\n the implementation involves wrapping the repl in an async function, it\n is erroneously allowed (e.g. yield or return at the top level)\n \"\"\"\n\n def generic_visit(self, node):\n func_types = (ast.FunctionDef, ast.AsyncFunctionDef)\n invalid_types = (ast.Return, ast.Yield, ast.YieldFrom)\n\n if isinstance(node, func_types):\n return # Don't recurse into functions\n elif isinstance(node, invalid_types):\n raise SyntaxError()\n else:\n super().generic_visit(node)\n\n\ndef _async_parse_cell(cell: str) -> ast.AST:\n \"\"\"\n This is a compatibility shim for pre-3.7 when async outside of a function\n is a syntax error at the parse stage.\n\n It will return an abstract syntax tree parsed as if async and await outside\n of a function were not a syntax error.\n \"\"\"\n if sys.version_info < (3, 7):\n # Prior to 3.7 you need to asyncify before parse\n wrapped_parse_tree = ast.parse(_asyncify(cell))\n return wrapped_parse_tree.body[0].body[0]\n else:\n return ast.parse(cell)\n\n\ndef _should_be_async(cell: str) -> bool:\n \"\"\"Detect if a block of code need to be wrapped in an `async def`\n\n Attempt to parse the block of code, it it compile we're fine.\n Otherwise we wrap if and try to compile.\n\n If it works, assume it should be async. Otherwise Return False.\n\n Not handled yet: If the block of code has a return statement as the top\n level, it will be seen as async. This is a know limitation.\n \"\"\"\n\n try:\n # we can't limit ourself to ast.parse, as it __accepts__ to parse on\n # 3.7+, but just does not _compile_\n compile(cell, \"<>\", \"exec\")\n return False\n except SyntaxError:\n try:\n parse_tree = _async_parse_cell(cell)\n\n # Raise a SyntaxError if there are top-level return or yields\n v = _AsyncSyntaxErrorVisitor()\n v.visit(parse_tree)\n\n except SyntaxError:\n return False\n return True\n return False\n", "path": "IPython/core/async_helpers.py"}]} | 2,156 | 295 |
gh_patches_debug_32876 | rasdani/github-patches | git_diff | getsentry__sentry-52083 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SDK Crash Detection: Set in app
Set all in app for all frames to `false` except for the SDK frames after stripping the event data.
https://github.com/getsentry/sentry/blob/95086b406dec79e6bcef45f299a3e92f727da2c0/src/sentry/utils/sdk_crashes/sdk_crash_detection.py#L58
</issue>
<code>
[start of src/sentry/utils/sdk_crashes/event_stripper.py]
1 from enum import Enum, auto
2 from typing import Any, Dict, Mapping, Optional, Sequence
3
4 from sentry.db.models import NodeData
5 from sentry.utils.safe import get_path
6 from sentry.utils.sdk_crashes.sdk_crash_detector import SDKCrashDetector
7
8
9 class Allow(Enum):
10 def __init__(self, explanation: str = "") -> None:
11 self.explanation = explanation
12
13 """Keeps the event data if it is of type str, int, float, bool."""
14 SIMPLE_TYPE = auto()
15
16 """
17 Doesn't keep the event data no matter the type. This can be used to explicitly
18 specify that data should be removed with an explanation.
19 """
20 NEVER = auto()
21
22 def with_explanation(self, explanation: str) -> "Allow":
23 self.explanation = explanation
24 return self
25
26
27 EVENT_DATA_ALLOWLIST = {
28 "type": Allow.SIMPLE_TYPE,
29 "datetime": Allow.SIMPLE_TYPE,
30 "timestamp": Allow.SIMPLE_TYPE,
31 "platform": Allow.SIMPLE_TYPE,
32 "sdk": {
33 "name": Allow.SIMPLE_TYPE,
34 "version": Allow.SIMPLE_TYPE,
35 "integrations": Allow.NEVER.with_explanation("Users can add their own integrations."),
36 },
37 "exception": {
38 "values": {
39 "stacktrace": {
40 "frames": {
41 "filename": Allow.SIMPLE_TYPE,
42 "function": Allow.SIMPLE_TYPE,
43 "raw_function": Allow.SIMPLE_TYPE,
44 "module": Allow.SIMPLE_TYPE,
45 "abs_path": Allow.SIMPLE_TYPE,
46 "in_app": Allow.SIMPLE_TYPE,
47 "instruction_addr": Allow.SIMPLE_TYPE,
48 "addr_mode": Allow.SIMPLE_TYPE,
49 "symbol": Allow.SIMPLE_TYPE,
50 "symbol_addr": Allow.SIMPLE_TYPE,
51 "image_addr": Allow.SIMPLE_TYPE,
52 "package": Allow.SIMPLE_TYPE,
53 "platform": Allow.SIMPLE_TYPE,
54 }
55 },
56 "value": Allow.NEVER.with_explanation("The exception value could contain PII."),
57 "type": Allow.SIMPLE_TYPE,
58 "mechanism": {
59 "handled": Allow.SIMPLE_TYPE,
60 "type": Allow.SIMPLE_TYPE,
61 "meta": {
62 "signal": {
63 "number": Allow.SIMPLE_TYPE,
64 "code": Allow.SIMPLE_TYPE,
65 "name": Allow.SIMPLE_TYPE,
66 "code_name": Allow.SIMPLE_TYPE,
67 },
68 "mach_exception": {
69 "exception": Allow.SIMPLE_TYPE,
70 "code": Allow.SIMPLE_TYPE,
71 "subcode": Allow.SIMPLE_TYPE,
72 "name": Allow.SIMPLE_TYPE,
73 },
74 },
75 },
76 }
77 },
78 "contexts": {
79 "device": {
80 "family": Allow.SIMPLE_TYPE,
81 "model": Allow.SIMPLE_TYPE,
82 "arch": Allow.SIMPLE_TYPE,
83 },
84 "os": {
85 "name": Allow.SIMPLE_TYPE,
86 "version": Allow.SIMPLE_TYPE,
87 "build": Allow.SIMPLE_TYPE,
88 },
89 },
90 }
91
92
93 def strip_event_data(
94 event_data: NodeData, sdk_crash_detector: SDKCrashDetector
95 ) -> Mapping[str, Any]:
96 new_event_data = _strip_event_data_with_allowlist(event_data, EVENT_DATA_ALLOWLIST)
97
98 if (new_event_data is None) or (new_event_data == {}):
99 return {}
100
101 stripped_frames: Sequence[Mapping[str, Any]] = []
102 frames = get_path(new_event_data, "exception", "values", -1, "stacktrace", "frames")
103
104 if frames is not None:
105 stripped_frames = _strip_frames(frames, sdk_crash_detector)
106 new_event_data["exception"]["values"][0]["stacktrace"]["frames"] = stripped_frames
107
108 return new_event_data
109
110
111 def _strip_event_data_with_allowlist(
112 data: Mapping[str, Any], allowlist: Optional[Mapping[str, Any]]
113 ) -> Optional[Mapping[str, Any]]:
114 """
115 Recursively traverses the data and only keeps values based on the allowlist.
116 """
117 if allowlist is None:
118 return None
119
120 stripped_data: Dict[str, Any] = {}
121 for data_key, data_value in data.items():
122 allowlist_for_data = allowlist.get(data_key)
123 if allowlist_for_data is None:
124 continue
125
126 if isinstance(allowlist_for_data, Allow):
127 allowed = allowlist_for_data
128
129 if allowed is Allow.SIMPLE_TYPE and isinstance(data_value, (str, int, float, bool)):
130 stripped_data[data_key] = data_value
131 else:
132 continue
133
134 elif isinstance(data_value, Mapping):
135 stripped_data[data_key] = _strip_event_data_with_allowlist(
136 data_value, allowlist_for_data
137 )
138 elif isinstance(data_value, Sequence):
139 stripped_data[data_key] = [
140 _strip_event_data_with_allowlist(item, allowlist_for_data) for item in data_value
141 ]
142
143 return stripped_data
144
145
146 def _strip_frames(
147 frames: Sequence[Mapping[str, Any]], sdk_crash_detector: SDKCrashDetector
148 ) -> Sequence[Mapping[str, Any]]:
149 """
150 Only keep SDK frames or Apple system libraries.
151 We need to adapt this logic once we support other platforms.
152 """
153
154 def is_system_library(frame: Mapping[str, Any]) -> bool:
155 fields_containing_paths = {"package", "module", "abs_path"}
156 system_library_paths = {"/System/Library/", "/usr/lib/system/"}
157
158 for field in fields_containing_paths:
159 for path in system_library_paths:
160 if frame.get(field, "").startswith(path):
161 return True
162
163 return False
164
165 return [
166 frame
167 for frame in frames
168 if sdk_crash_detector.is_sdk_frame(frame) or is_system_library(frame)
169 ]
170
[end of src/sentry/utils/sdk_crashes/event_stripper.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/utils/sdk_crashes/event_stripper.py b/src/sentry/utils/sdk_crashes/event_stripper.py
--- a/src/sentry/utils/sdk_crashes/event_stripper.py
+++ b/src/sentry/utils/sdk_crashes/event_stripper.py
@@ -1,5 +1,5 @@
from enum import Enum, auto
-from typing import Any, Dict, Mapping, Optional, Sequence
+from typing import Any, Dict, Mapping, MutableMapping, Optional, Sequence
from sentry.db.models import NodeData
from sentry.utils.safe import get_path
@@ -98,11 +98,11 @@
if (new_event_data is None) or (new_event_data == {}):
return {}
- stripped_frames: Sequence[Mapping[str, Any]] = []
frames = get_path(new_event_data, "exception", "values", -1, "stacktrace", "frames")
if frames is not None:
stripped_frames = _strip_frames(frames, sdk_crash_detector)
+
new_event_data["exception"]["values"][0]["stacktrace"]["frames"] = stripped_frames
return new_event_data
@@ -144,7 +144,7 @@
def _strip_frames(
- frames: Sequence[Mapping[str, Any]], sdk_crash_detector: SDKCrashDetector
+ frames: Sequence[MutableMapping[str, Any]], sdk_crash_detector: SDKCrashDetector
) -> Sequence[Mapping[str, Any]]:
"""
Only keep SDK frames or Apple system libraries.
@@ -162,8 +162,15 @@
return False
+ def strip_frame(frame: MutableMapping[str, Any]) -> MutableMapping[str, Any]:
+ if sdk_crash_detector.is_sdk_frame(frame):
+ frame["in_app"] = True
+ else:
+ frame["in_app"] = False
+ return frame
+
return [
- frame
+ strip_frame(frame)
for frame in frames
if sdk_crash_detector.is_sdk_frame(frame) or is_system_library(frame)
]
| {"golden_diff": "diff --git a/src/sentry/utils/sdk_crashes/event_stripper.py b/src/sentry/utils/sdk_crashes/event_stripper.py\n--- a/src/sentry/utils/sdk_crashes/event_stripper.py\n+++ b/src/sentry/utils/sdk_crashes/event_stripper.py\n@@ -1,5 +1,5 @@\n from enum import Enum, auto\n-from typing import Any, Dict, Mapping, Optional, Sequence\n+from typing import Any, Dict, Mapping, MutableMapping, Optional, Sequence\n \n from sentry.db.models import NodeData\n from sentry.utils.safe import get_path\n@@ -98,11 +98,11 @@\n if (new_event_data is None) or (new_event_data == {}):\n return {}\n \n- stripped_frames: Sequence[Mapping[str, Any]] = []\n frames = get_path(new_event_data, \"exception\", \"values\", -1, \"stacktrace\", \"frames\")\n \n if frames is not None:\n stripped_frames = _strip_frames(frames, sdk_crash_detector)\n+\n new_event_data[\"exception\"][\"values\"][0][\"stacktrace\"][\"frames\"] = stripped_frames\n \n return new_event_data\n@@ -144,7 +144,7 @@\n \n \n def _strip_frames(\n- frames: Sequence[Mapping[str, Any]], sdk_crash_detector: SDKCrashDetector\n+ frames: Sequence[MutableMapping[str, Any]], sdk_crash_detector: SDKCrashDetector\n ) -> Sequence[Mapping[str, Any]]:\n \"\"\"\n Only keep SDK frames or Apple system libraries.\n@@ -162,8 +162,15 @@\n \n return False\n \n+ def strip_frame(frame: MutableMapping[str, Any]) -> MutableMapping[str, Any]:\n+ if sdk_crash_detector.is_sdk_frame(frame):\n+ frame[\"in_app\"] = True\n+ else:\n+ frame[\"in_app\"] = False\n+ return frame\n+\n return [\n- frame\n+ strip_frame(frame)\n for frame in frames\n if sdk_crash_detector.is_sdk_frame(frame) or is_system_library(frame)\n ]\n", "issue": "SDK Crash Detection: Set in app\nSet all in app for all frames to `false` except for the SDK frames after stripping the event data. \r\n\r\nhttps://github.com/getsentry/sentry/blob/95086b406dec79e6bcef45f299a3e92f727da2c0/src/sentry/utils/sdk_crashes/sdk_crash_detection.py#L58\n", "before_files": [{"content": "from enum import Enum, auto\nfrom typing import Any, Dict, Mapping, Optional, Sequence\n\nfrom sentry.db.models import NodeData\nfrom sentry.utils.safe import get_path\nfrom sentry.utils.sdk_crashes.sdk_crash_detector import SDKCrashDetector\n\n\nclass Allow(Enum):\n def __init__(self, explanation: str = \"\") -> None:\n self.explanation = explanation\n\n \"\"\"Keeps the event data if it is of type str, int, float, bool.\"\"\"\n SIMPLE_TYPE = auto()\n\n \"\"\"\n Doesn't keep the event data no matter the type. This can be used to explicitly\n specify that data should be removed with an explanation.\n \"\"\"\n NEVER = auto()\n\n def with_explanation(self, explanation: str) -> \"Allow\":\n self.explanation = explanation\n return self\n\n\nEVENT_DATA_ALLOWLIST = {\n \"type\": Allow.SIMPLE_TYPE,\n \"datetime\": Allow.SIMPLE_TYPE,\n \"timestamp\": Allow.SIMPLE_TYPE,\n \"platform\": Allow.SIMPLE_TYPE,\n \"sdk\": {\n \"name\": Allow.SIMPLE_TYPE,\n \"version\": Allow.SIMPLE_TYPE,\n \"integrations\": Allow.NEVER.with_explanation(\"Users can add their own integrations.\"),\n },\n \"exception\": {\n \"values\": {\n \"stacktrace\": {\n \"frames\": {\n \"filename\": Allow.SIMPLE_TYPE,\n \"function\": Allow.SIMPLE_TYPE,\n \"raw_function\": Allow.SIMPLE_TYPE,\n \"module\": Allow.SIMPLE_TYPE,\n \"abs_path\": Allow.SIMPLE_TYPE,\n \"in_app\": Allow.SIMPLE_TYPE,\n \"instruction_addr\": Allow.SIMPLE_TYPE,\n \"addr_mode\": Allow.SIMPLE_TYPE,\n \"symbol\": Allow.SIMPLE_TYPE,\n \"symbol_addr\": Allow.SIMPLE_TYPE,\n \"image_addr\": Allow.SIMPLE_TYPE,\n \"package\": Allow.SIMPLE_TYPE,\n \"platform\": Allow.SIMPLE_TYPE,\n }\n },\n \"value\": Allow.NEVER.with_explanation(\"The exception value could contain PII.\"),\n \"type\": Allow.SIMPLE_TYPE,\n \"mechanism\": {\n \"handled\": Allow.SIMPLE_TYPE,\n \"type\": Allow.SIMPLE_TYPE,\n \"meta\": {\n \"signal\": {\n \"number\": Allow.SIMPLE_TYPE,\n \"code\": Allow.SIMPLE_TYPE,\n \"name\": Allow.SIMPLE_TYPE,\n \"code_name\": Allow.SIMPLE_TYPE,\n },\n \"mach_exception\": {\n \"exception\": Allow.SIMPLE_TYPE,\n \"code\": Allow.SIMPLE_TYPE,\n \"subcode\": Allow.SIMPLE_TYPE,\n \"name\": Allow.SIMPLE_TYPE,\n },\n },\n },\n }\n },\n \"contexts\": {\n \"device\": {\n \"family\": Allow.SIMPLE_TYPE,\n \"model\": Allow.SIMPLE_TYPE,\n \"arch\": Allow.SIMPLE_TYPE,\n },\n \"os\": {\n \"name\": Allow.SIMPLE_TYPE,\n \"version\": Allow.SIMPLE_TYPE,\n \"build\": Allow.SIMPLE_TYPE,\n },\n },\n}\n\n\ndef strip_event_data(\n event_data: NodeData, sdk_crash_detector: SDKCrashDetector\n) -> Mapping[str, Any]:\n new_event_data = _strip_event_data_with_allowlist(event_data, EVENT_DATA_ALLOWLIST)\n\n if (new_event_data is None) or (new_event_data == {}):\n return {}\n\n stripped_frames: Sequence[Mapping[str, Any]] = []\n frames = get_path(new_event_data, \"exception\", \"values\", -1, \"stacktrace\", \"frames\")\n\n if frames is not None:\n stripped_frames = _strip_frames(frames, sdk_crash_detector)\n new_event_data[\"exception\"][\"values\"][0][\"stacktrace\"][\"frames\"] = stripped_frames\n\n return new_event_data\n\n\ndef _strip_event_data_with_allowlist(\n data: Mapping[str, Any], allowlist: Optional[Mapping[str, Any]]\n) -> Optional[Mapping[str, Any]]:\n \"\"\"\n Recursively traverses the data and only keeps values based on the allowlist.\n \"\"\"\n if allowlist is None:\n return None\n\n stripped_data: Dict[str, Any] = {}\n for data_key, data_value in data.items():\n allowlist_for_data = allowlist.get(data_key)\n if allowlist_for_data is None:\n continue\n\n if isinstance(allowlist_for_data, Allow):\n allowed = allowlist_for_data\n\n if allowed is Allow.SIMPLE_TYPE and isinstance(data_value, (str, int, float, bool)):\n stripped_data[data_key] = data_value\n else:\n continue\n\n elif isinstance(data_value, Mapping):\n stripped_data[data_key] = _strip_event_data_with_allowlist(\n data_value, allowlist_for_data\n )\n elif isinstance(data_value, Sequence):\n stripped_data[data_key] = [\n _strip_event_data_with_allowlist(item, allowlist_for_data) for item in data_value\n ]\n\n return stripped_data\n\n\ndef _strip_frames(\n frames: Sequence[Mapping[str, Any]], sdk_crash_detector: SDKCrashDetector\n) -> Sequence[Mapping[str, Any]]:\n \"\"\"\n Only keep SDK frames or Apple system libraries.\n We need to adapt this logic once we support other platforms.\n \"\"\"\n\n def is_system_library(frame: Mapping[str, Any]) -> bool:\n fields_containing_paths = {\"package\", \"module\", \"abs_path\"}\n system_library_paths = {\"/System/Library/\", \"/usr/lib/system/\"}\n\n for field in fields_containing_paths:\n for path in system_library_paths:\n if frame.get(field, \"\").startswith(path):\n return True\n\n return False\n\n return [\n frame\n for frame in frames\n if sdk_crash_detector.is_sdk_frame(frame) or is_system_library(frame)\n ]\n", "path": "src/sentry/utils/sdk_crashes/event_stripper.py"}]} | 2,323 | 448 |
gh_patches_debug_6916 | rasdani/github-patches | git_diff | joke2k__faker-678 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
NameError: name 'args' is not defined
I'm getting an error using the *args method.
```
fake.email(*args, **kwargs)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'args' is not defined
```
Also can I change the **kwargs with @gmail to get [email protected]?
</issue>
<code>
[start of faker/providers/internet/__init__.py]
1 # coding=utf-8
2 from __future__ import unicode_literals
3
4 from text_unidecode import unidecode
5
6 from .. import BaseProvider
7
8 from ipaddress import ip_address, ip_network, IPV4LENGTH, IPV6LENGTH
9
10 # from faker.generator import random
11 # from faker.providers.lorem.la import Provider as Lorem
12 from faker.utils.decorators import lowercase, slugify, slugify_unicode
13
14
15 localized = True
16
17
18 class Provider(BaseProvider):
19 safe_email_tlds = ('org', 'com', 'net')
20 free_email_domains = ('gmail.com', 'yahoo.com', 'hotmail.com')
21 tlds = (
22 'com', 'com', 'com', 'com', 'com', 'com', 'biz', 'info', 'net', 'org'
23 )
24
25 uri_pages = (
26 'index', 'home', 'search', 'main', 'post', 'homepage', 'category',
27 'register', 'login', 'faq', 'about', 'terms', 'privacy', 'author'
28 )
29 uri_paths = (
30 'app', 'main', 'wp-content', 'search', 'category', 'tag', 'categories',
31 'tags', 'blog', 'posts', 'list', 'explore'
32 )
33 uri_extensions = (
34 '.html', '.html', '.html', '.htm', '.htm', '.php', '.php', '.jsp',
35 '.asp'
36 )
37
38 user_name_formats = (
39 '{{last_name}}.{{first_name}}',
40 '{{first_name}}.{{last_name}}',
41 '{{first_name}}##',
42 '?{{last_name}}',
43 )
44 email_formats = (
45 '{{user_name}}@{{domain_name}}',
46 '{{user_name}}@{{free_email_domain}}',
47 )
48 url_formats = (
49 'http://www.{{domain_name}}/',
50 'http://{{domain_name}}/',
51 'https://www.{{domain_name}}/',
52 'https://{{domain_name}}/',
53 )
54 uri_formats = (
55 '{{url}}',
56 '{{url}}{{uri_page}}/',
57 '{{url}}{{uri_page}}{{uri_extension}}',
58 '{{url}}{{uri_path}}/{{uri_page}}/',
59 '{{url}}{{uri_path}}/{{uri_page}}{{uri_extension}}',
60 )
61 image_placeholder_services = (
62 'https://placeholdit.imgix.net/~text'
63 '?txtsize=55&txt={width}x{height}&w={width}&h={height}',
64 'https://www.lorempixel.com/{width}/{height}',
65 'https://dummyimage.com/{width}x{height}',
66 )
67
68 replacements = tuple()
69
70 def _to_ascii(self, string):
71 for search, replace in self.replacements:
72 string = string.replace(search, replace)
73
74 string = unidecode(string)
75 return string
76
77 @lowercase
78 def email(self):
79 pattern = self.random_element(self.email_formats)
80 return "".join(self.generator.parse(pattern).split(" "))
81
82 @lowercase
83 def safe_email(self):
84 return '{}@example.{}'.format(
85 self.user_name(), self.random_element(self.safe_email_tlds)
86 )
87
88 @lowercase
89 def free_email(self):
90 return self.user_name() + '@' + self.free_email_domain()
91
92 @lowercase
93 def company_email(self):
94 return self.user_name() + '@' + self.domain_name()
95
96 @lowercase
97 def free_email_domain(self):
98 return self.random_element(self.free_email_domains)
99
100 @lowercase
101 def ascii_email(self):
102 pattern = self.random_element(self.email_formats)
103 return self._to_ascii(
104 "".join(self.generator.parse(pattern).split(" "))
105 )
106
107 @lowercase
108 def ascii_safe_email(self):
109 return self._to_ascii(
110 self.user_name() +
111 '@example.' +
112 self.random_element(self.safe_email_tlds)
113 )
114
115 @lowercase
116 def ascii_free_email(self):
117 return self._to_ascii(
118 self.user_name() + '@' + self.free_email_domain()
119 )
120
121 @lowercase
122 def ascii_company_email(self):
123 return self._to_ascii(
124 self.user_name() + '@' + self.domain_name()
125 )
126
127 @slugify_unicode
128 def user_name(self):
129 pattern = self.random_element(self.user_name_formats)
130 username = self._to_ascii(
131 self.bothify(self.generator.parse(pattern)).lower()
132 )
133 return username
134
135 @lowercase
136 def domain_name(self, levels=1):
137 """
138 Produce an Internet domain name with the specified number of
139 subdomain levels.
140
141 >>> domain_name()
142 nichols-phillips.com
143 >>> domain_name(2)
144 williamson-hopkins.jackson.com
145 """
146 if levels < 1:
147 raise ValueError("levels must be greater than or equal to 1")
148 if levels == 1:
149 return self.domain_word() + '.' + self.tld()
150 else:
151 return self.domain_word() + '.' + self.domain_name(levels - 1)
152
153 @lowercase
154 @slugify_unicode
155 def domain_word(self,):
156 company = self.generator.format('company')
157 company_elements = company.split(' ')
158 company = self._to_ascii(company_elements.pop(0))
159 return company
160
161 def tld(self):
162 return self.random_element(self.tlds)
163
164 def url(self):
165 pattern = self.random_element(self.url_formats)
166 return self.generator.parse(pattern)
167
168 def ipv4(self, network=False):
169 """Produce a random IPv4 address or network with a valid CIDR"""
170 address = str(ip_address(self.generator.random.randint(
171 0, (2 ** IPV4LENGTH) - 1)))
172 if network:
173 address += '/' + str(self.generator.random.randint(0, IPV4LENGTH))
174 address = str(ip_network(address, strict=False))
175 return address
176
177 def ipv6(self, network=False):
178 """Produce a random IPv6 address or network with a valid CIDR"""
179 address = str(ip_address(self.generator.random.randint(
180 2 ** IPV4LENGTH, (2 ** IPV6LENGTH) - 1)))
181 if network:
182 address += '/' + str(self.generator.random.randint(0, IPV6LENGTH))
183 address = str(ip_network(address, strict=False))
184 return address
185
186 def mac_address(self):
187 mac = [self.generator.random.randint(0x00, 0xff) for i in range(0, 6)]
188 return ":".join(map(lambda x: "%02x" % x, mac))
189
190 def uri_page(self):
191 return self.random_element(self.uri_pages)
192
193 def uri_path(self, deep=None):
194 deep = deep if deep else self.generator.random.randint(1, 3)
195 return "/".join(
196 [self.random_element(self.uri_paths) for _ in range(0, deep)]
197 )
198
199 def uri_extension(self):
200 return self.random_element(self.uri_extensions)
201
202 def uri(self):
203 pattern = self.random_element(self.uri_formats)
204 return self.generator.parse(pattern)
205
206 @slugify
207 def slug(self, value=None):
208 """Django algorithm"""
209 if value is None:
210 value = self.generator.text(20)
211 return value
212
213 def image_url(self, width=None, height=None):
214 """
215 Returns URL to placeholder image
216 Example: http://placehold.it/640x480
217 """
218 width_ = width or self.random_int(max=1024)
219 height_ = height or self.random_int(max=1024)
220 placeholder_url = self.random_element(self.image_placeholder_services)
221 return placeholder_url.format(width=width_, height=height_)
222
[end of faker/providers/internet/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/faker/providers/internet/__init__.py b/faker/providers/internet/__init__.py
--- a/faker/providers/internet/__init__.py
+++ b/faker/providers/internet/__init__.py
@@ -75,9 +75,13 @@
return string
@lowercase
- def email(self):
- pattern = self.random_element(self.email_formats)
- return "".join(self.generator.parse(pattern).split(" "))
+ def email(self, domain=None):
+ if domain:
+ email = '{0}@{1}'.format(self.user_name(), domain)
+ else:
+ pattern = self.random_element(self.email_formats)
+ email = "".join(self.generator.parse(pattern).split(" "))
+ return email
@lowercase
def safe_email(self):
| {"golden_diff": "diff --git a/faker/providers/internet/__init__.py b/faker/providers/internet/__init__.py\n--- a/faker/providers/internet/__init__.py\n+++ b/faker/providers/internet/__init__.py\n@@ -75,9 +75,13 @@\n return string\n \n @lowercase\n- def email(self):\n- pattern = self.random_element(self.email_formats)\n- return \"\".join(self.generator.parse(pattern).split(\" \"))\n+ def email(self, domain=None):\n+ if domain:\n+ email = '{0}@{1}'.format(self.user_name(), domain)\n+ else:\n+ pattern = self.random_element(self.email_formats)\n+ email = \"\".join(self.generator.parse(pattern).split(\" \"))\n+ return email\n \n @lowercase\n def safe_email(self):\n", "issue": "NameError: name 'args' is not defined\nI'm getting an error using the *args method.\r\n\r\n```\r\nfake.email(*args, **kwargs)\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\nNameError: name 'args' is not defined\r\n\r\n```\r\nAlso can I change the **kwargs with @gmail to get [email protected]?\r\n\r\n\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals\n\nfrom text_unidecode import unidecode\n\nfrom .. import BaseProvider\n\nfrom ipaddress import ip_address, ip_network, IPV4LENGTH, IPV6LENGTH\n\n# from faker.generator import random\n# from faker.providers.lorem.la import Provider as Lorem\nfrom faker.utils.decorators import lowercase, slugify, slugify_unicode\n\n\nlocalized = True\n\n\nclass Provider(BaseProvider):\n safe_email_tlds = ('org', 'com', 'net')\n free_email_domains = ('gmail.com', 'yahoo.com', 'hotmail.com')\n tlds = (\n 'com', 'com', 'com', 'com', 'com', 'com', 'biz', 'info', 'net', 'org'\n )\n\n uri_pages = (\n 'index', 'home', 'search', 'main', 'post', 'homepage', 'category',\n 'register', 'login', 'faq', 'about', 'terms', 'privacy', 'author'\n )\n uri_paths = (\n 'app', 'main', 'wp-content', 'search', 'category', 'tag', 'categories',\n 'tags', 'blog', 'posts', 'list', 'explore'\n )\n uri_extensions = (\n '.html', '.html', '.html', '.htm', '.htm', '.php', '.php', '.jsp',\n '.asp'\n )\n\n user_name_formats = (\n '{{last_name}}.{{first_name}}',\n '{{first_name}}.{{last_name}}',\n '{{first_name}}##',\n '?{{last_name}}',\n )\n email_formats = (\n '{{user_name}}@{{domain_name}}',\n '{{user_name}}@{{free_email_domain}}',\n )\n url_formats = (\n 'http://www.{{domain_name}}/',\n 'http://{{domain_name}}/',\n 'https://www.{{domain_name}}/',\n 'https://{{domain_name}}/',\n )\n uri_formats = (\n '{{url}}',\n '{{url}}{{uri_page}}/',\n '{{url}}{{uri_page}}{{uri_extension}}',\n '{{url}}{{uri_path}}/{{uri_page}}/',\n '{{url}}{{uri_path}}/{{uri_page}}{{uri_extension}}',\n )\n image_placeholder_services = (\n 'https://placeholdit.imgix.net/~text'\n '?txtsize=55&txt={width}x{height}&w={width}&h={height}',\n 'https://www.lorempixel.com/{width}/{height}',\n 'https://dummyimage.com/{width}x{height}',\n )\n\n replacements = tuple()\n\n def _to_ascii(self, string):\n for search, replace in self.replacements:\n string = string.replace(search, replace)\n\n string = unidecode(string)\n return string\n\n @lowercase\n def email(self):\n pattern = self.random_element(self.email_formats)\n return \"\".join(self.generator.parse(pattern).split(\" \"))\n\n @lowercase\n def safe_email(self):\n return '{}@example.{}'.format(\n self.user_name(), self.random_element(self.safe_email_tlds)\n )\n\n @lowercase\n def free_email(self):\n return self.user_name() + '@' + self.free_email_domain()\n\n @lowercase\n def company_email(self):\n return self.user_name() + '@' + self.domain_name()\n\n @lowercase\n def free_email_domain(self):\n return self.random_element(self.free_email_domains)\n\n @lowercase\n def ascii_email(self):\n pattern = self.random_element(self.email_formats)\n return self._to_ascii(\n \"\".join(self.generator.parse(pattern).split(\" \"))\n )\n\n @lowercase\n def ascii_safe_email(self):\n return self._to_ascii(\n self.user_name() +\n '@example.' +\n self.random_element(self.safe_email_tlds)\n )\n\n @lowercase\n def ascii_free_email(self):\n return self._to_ascii(\n self.user_name() + '@' + self.free_email_domain()\n )\n\n @lowercase\n def ascii_company_email(self):\n return self._to_ascii(\n self.user_name() + '@' + self.domain_name()\n )\n\n @slugify_unicode\n def user_name(self):\n pattern = self.random_element(self.user_name_formats)\n username = self._to_ascii(\n self.bothify(self.generator.parse(pattern)).lower()\n )\n return username\n\n @lowercase\n def domain_name(self, levels=1):\n \"\"\"\n Produce an Internet domain name with the specified number of\n subdomain levels.\n\n >>> domain_name()\n nichols-phillips.com\n >>> domain_name(2)\n williamson-hopkins.jackson.com\n \"\"\"\n if levels < 1:\n raise ValueError(\"levels must be greater than or equal to 1\")\n if levels == 1:\n return self.domain_word() + '.' + self.tld()\n else:\n return self.domain_word() + '.' + self.domain_name(levels - 1)\n\n @lowercase\n @slugify_unicode\n def domain_word(self,):\n company = self.generator.format('company')\n company_elements = company.split(' ')\n company = self._to_ascii(company_elements.pop(0))\n return company\n\n def tld(self):\n return self.random_element(self.tlds)\n\n def url(self):\n pattern = self.random_element(self.url_formats)\n return self.generator.parse(pattern)\n\n def ipv4(self, network=False):\n \"\"\"Produce a random IPv4 address or network with a valid CIDR\"\"\"\n address = str(ip_address(self.generator.random.randint(\n 0, (2 ** IPV4LENGTH) - 1)))\n if network:\n address += '/' + str(self.generator.random.randint(0, IPV4LENGTH))\n address = str(ip_network(address, strict=False))\n return address\n\n def ipv6(self, network=False):\n \"\"\"Produce a random IPv6 address or network with a valid CIDR\"\"\"\n address = str(ip_address(self.generator.random.randint(\n 2 ** IPV4LENGTH, (2 ** IPV6LENGTH) - 1)))\n if network:\n address += '/' + str(self.generator.random.randint(0, IPV6LENGTH))\n address = str(ip_network(address, strict=False))\n return address\n\n def mac_address(self):\n mac = [self.generator.random.randint(0x00, 0xff) for i in range(0, 6)]\n return \":\".join(map(lambda x: \"%02x\" % x, mac))\n\n def uri_page(self):\n return self.random_element(self.uri_pages)\n\n def uri_path(self, deep=None):\n deep = deep if deep else self.generator.random.randint(1, 3)\n return \"/\".join(\n [self.random_element(self.uri_paths) for _ in range(0, deep)]\n )\n\n def uri_extension(self):\n return self.random_element(self.uri_extensions)\n\n def uri(self):\n pattern = self.random_element(self.uri_formats)\n return self.generator.parse(pattern)\n\n @slugify\n def slug(self, value=None):\n \"\"\"Django algorithm\"\"\"\n if value is None:\n value = self.generator.text(20)\n return value\n\n def image_url(self, width=None, height=None):\n \"\"\"\n Returns URL to placeholder image\n Example: http://placehold.it/640x480\n \"\"\"\n width_ = width or self.random_int(max=1024)\n height_ = height or self.random_int(max=1024)\n placeholder_url = self.random_element(self.image_placeholder_services)\n return placeholder_url.format(width=width_, height=height_)\n", "path": "faker/providers/internet/__init__.py"}]} | 2,844 | 176 |
gh_patches_debug_16733 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-1729 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: adur_worthing_gov_uk has changed format of address
### I Have A Problem With:
A specific source
### What's Your Problem
The Adur and Worthing council used to return my address as “12 Roadname”, so that’s what I had in my source args. But the format has recently changed to “12 ROADNAME”, causing the lookup in adur_worthing_gov_uk.py to fail.
As the council is just as likely to change it back at some point can I suggest that the lookup is made case independent?
### Source (if relevant)
adur_worthing_gov_uk
### Logs
_No response_
### Relevant Configuration
_No response_
### Checklist Source Error
- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [X] Checked that the website of your service provider is still working
- [X] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
</issue>
<code>
[start of custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py]
1 from datetime import datetime
2
3 import bs4
4 import requests
5 from waste_collection_schedule import Collection # type: ignore[attr-defined]
6
7 TITLE = "Adur & Worthing Councils"
8 DESCRIPTION = "Source for adur-worthing.gov.uk services for Adur & Worthing, UK."
9 URL = "https://adur-worthing.gov.uk"
10 TEST_CASES = {
11 "Test_001": {"postcode": "BN15 9UX", "address": "1 Western Road North"},
12 "Test_002": {"postcode": "BN43 5WE", "address": "6 Hebe Road"},
13 }
14 HEADERS = {
15 "user-agent": "Mozilla/5.0",
16 }
17 ICON_MAP = {
18 "Recycling": "mdi:recycle",
19 "Refuse": "mdi:trash-can",
20 "Garden": "mdi:leaf",
21 }
22
23
24 class Source:
25 def __init__(self, postcode, address):
26 self._postcode = postcode
27 self._address = address
28
29 def fetch(self):
30
31 if self._postcode is None or self._address is None:
32 raise ValueError("Either postcode or address is None")
33
34 s = requests.Session()
35
36 postcode_search_request = s.get(
37 f"https://www.adur-worthing.gov.uk/bin-day/?brlu-address-postcode={self._postcode}&return-url=/bin-day/&action=search",
38 headers=HEADERS,
39 )
40 html_addresses = postcode_search_request.content
41 addresses = bs4.BeautifulSoup(html_addresses, "html.parser")
42 addresses_select = addresses.find("select", {"id": "brlu-selected-address"})
43
44 found_address = None
45 for address in addresses_select.find_all("option"):
46 if self._address in address.get_text():
47 found_address = address
48
49 if found_address is None:
50 raise ValueError("Address not found")
51
52 collections_request = s.get(
53 f"https://www.adur-worthing.gov.uk/bin-day/?brlu-selected-address={address['value']}&return-url=/bin-day/",
54 headers=HEADERS,
55 )
56 html_collections = collections_request.content
57 bin_collections = bs4.BeautifulSoup(html_collections, "html.parser")
58
59 bin_days_table = bin_collections.find("table", class_="bin-days")
60 bin_days_table_body = bin_days_table.find("tbody")
61 bin_days_by_type = bin_days_table_body.find_all("tr")
62
63 entries = []
64
65 for bin_by_type in bin_days_by_type:
66 bin_type = bin_by_type.find("th").text
67 icon = ICON_MAP.get(bin_type)
68 bin_days = bin_by_type.find_all("td")[-1].get_text(separator="\n")
69 for bin_day in bin_days.split("\n"):
70 bin_datetime = datetime.strptime(bin_day, "%A %d %b %Y").date()
71 entries.append(Collection(t=bin_type, date=bin_datetime, icon=icon))
72
73 return entries
74
[end of custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py
@@ -27,7 +27,6 @@
self._address = address
def fetch(self):
-
if self._postcode is None or self._address is None:
raise ValueError("Either postcode or address is None")
@@ -43,7 +42,7 @@
found_address = None
for address in addresses_select.find_all("option"):
- if self._address in address.get_text():
+ if self._address.upper() in address.get_text().upper():
found_address = address
if found_address is None:
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py\n@@ -27,7 +27,6 @@\n self._address = address\n \n def fetch(self):\n-\n if self._postcode is None or self._address is None:\n raise ValueError(\"Either postcode or address is None\")\n \n@@ -43,7 +42,7 @@\n \n found_address = None\n for address in addresses_select.find_all(\"option\"):\n- if self._address in address.get_text():\n+ if self._address.upper() in address.get_text().upper():\n found_address = address\n \n if found_address is None:\n", "issue": "[Bug]: adur_worthing_gov_uk has changed format of address\n### I Have A Problem With:\n\nA specific source\n\n### What's Your Problem\n\nThe Adur and Worthing council used to return my address as \u201c12 Roadname\u201d, so that\u2019s what I had in my source args. But the format has recently changed to \u201c12 ROADNAME\u201d, causing the lookup in adur_worthing_gov_uk.py to fail. \r\n\r\nAs the council is just as likely to change it back at some point can I suggest that the lookup is made case independent?\n\n### Source (if relevant)\n\nadur_worthing_gov_uk\n\n### Logs\n\n_No response_\n\n### Relevant Configuration\n\n_No response_\n\n### Checklist Source Error\n\n- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [X] Checked that the website of your service provider is still working\n- [X] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "from datetime import datetime\n\nimport bs4\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Adur & Worthing Councils\"\nDESCRIPTION = \"Source for adur-worthing.gov.uk services for Adur & Worthing, UK.\"\nURL = \"https://adur-worthing.gov.uk\"\nTEST_CASES = {\n \"Test_001\": {\"postcode\": \"BN15 9UX\", \"address\": \"1 Western Road North\"},\n \"Test_002\": {\"postcode\": \"BN43 5WE\", \"address\": \"6 Hebe Road\"},\n}\nHEADERS = {\n \"user-agent\": \"Mozilla/5.0\",\n}\nICON_MAP = {\n \"Recycling\": \"mdi:recycle\",\n \"Refuse\": \"mdi:trash-can\",\n \"Garden\": \"mdi:leaf\",\n}\n\n\nclass Source:\n def __init__(self, postcode, address):\n self._postcode = postcode\n self._address = address\n\n def fetch(self):\n\n if self._postcode is None or self._address is None:\n raise ValueError(\"Either postcode or address is None\")\n\n s = requests.Session()\n\n postcode_search_request = s.get(\n f\"https://www.adur-worthing.gov.uk/bin-day/?brlu-address-postcode={self._postcode}&return-url=/bin-day/&action=search\",\n headers=HEADERS,\n )\n html_addresses = postcode_search_request.content\n addresses = bs4.BeautifulSoup(html_addresses, \"html.parser\")\n addresses_select = addresses.find(\"select\", {\"id\": \"brlu-selected-address\"})\n\n found_address = None\n for address in addresses_select.find_all(\"option\"):\n if self._address in address.get_text():\n found_address = address\n\n if found_address is None:\n raise ValueError(\"Address not found\")\n\n collections_request = s.get(\n f\"https://www.adur-worthing.gov.uk/bin-day/?brlu-selected-address={address['value']}&return-url=/bin-day/\",\n headers=HEADERS,\n )\n html_collections = collections_request.content\n bin_collections = bs4.BeautifulSoup(html_collections, \"html.parser\")\n\n bin_days_table = bin_collections.find(\"table\", class_=\"bin-days\")\n bin_days_table_body = bin_days_table.find(\"tbody\")\n bin_days_by_type = bin_days_table_body.find_all(\"tr\")\n\n entries = []\n\n for bin_by_type in bin_days_by_type:\n bin_type = bin_by_type.find(\"th\").text\n icon = ICON_MAP.get(bin_type)\n bin_days = bin_by_type.find_all(\"td\")[-1].get_text(separator=\"\\n\")\n for bin_day in bin_days.split(\"\\n\"):\n bin_datetime = datetime.strptime(bin_day, \"%A %d %b %Y\").date()\n entries.append(Collection(t=bin_type, date=bin_datetime, icon=icon))\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/adur_worthing_gov_uk.py"}]} | 1,697 | 216 |
gh_patches_debug_30705 | rasdani/github-patches | git_diff | getredash__redash-3506 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
No invitation link after adding a new user when email server is not configured
<!--
We use GitHub only for bug reports 🐛
Anything else should be posted to https://discuss.redash.io 👫
🚨For support, help & questions use https://discuss.redash.io/c/support
💡For feature requests & ideas use https://discuss.redash.io/c/feature-requests
**Found a security vulnerability?** Please email [email protected] to report any security vulnerabilities. We will acknowledge receipt of your vulnerability and strive to send you regular updates about our progress. If you're curious about the status of your disclosure please feel free to email us again. If you want to encrypt your disclosure email, you can use this PGP key.
-->
### Issue Summary
After #3267 and #3229 admins no longer receive a link to invite users when the server doesn't have an email server configured.
### Steps to Reproduce
1. Open `/users/new` in a server without email configured (e.g.: [preview](https://redash-preview.netlify.com/users/new))
2. Create a new user and check that no link is provided.
3. Open the pending invitation User Profile and notice there's only the "Resend" option (again it leads to nowhere)
Any other info e.g. Why do you consider this to be a bug? What did you expect to happen instead?
The admin should receive a link to send to the user manually.
### Technical details:
* Redash Version: Latest (Redash Preview)
* Browser/OS: --
* How did you install Redash: --
</issue>
<code>
[start of redash/handlers/users.py]
1 import re
2 import time
3 from flask import request
4 from flask_restful import abort
5 from flask_login import current_user, login_user
6 from funcy import project
7 from sqlalchemy.exc import IntegrityError
8 from disposable_email_domains import blacklist
9 from funcy import partial
10
11 from redash import models
12 from redash.permissions import require_permission, require_admin_or_owner, is_admin_or_owner, \
13 require_permission_or_owner, require_admin
14 from redash.handlers.base import BaseResource, require_fields, get_object_or_404, paginate, order_results as _order_results
15
16 from redash.authentication.account import invite_link_for_user, send_invite_email, send_password_reset_email, send_verify_email
17 from redash.settings import parse_boolean
18
19
20 # Ordering map for relationships
21 order_map = {
22 'name': 'name',
23 '-name': '-name',
24 'active_at': 'active_at',
25 '-active_at': '-active_at',
26 'created_at': 'created_at',
27 '-created_at': '-created_at',
28 'groups': 'group_ids',
29 '-groups': '-group_ids',
30 }
31
32 order_results = partial(
33 _order_results,
34 default_order='-created_at',
35 allowed_orders=order_map,
36 )
37
38
39 def invite_user(org, inviter, user):
40 invite_url = invite_link_for_user(user)
41 send_invite_email(inviter, user, invite_url, org)
42
43
44 class UserListResource(BaseResource):
45 def get_users(self, disabled, pending, search_term):
46 if disabled:
47 users = models.User.all_disabled(self.current_org)
48 else:
49 users = models.User.all(self.current_org)
50
51 if pending is not None:
52 users = models.User.pending(users, pending)
53
54 if search_term:
55 users = models.User.search(users, search_term)
56 self.record_event({
57 'action': 'search',
58 'object_type': 'user',
59 'term': search_term,
60 'pending': pending,
61 })
62 else:
63 self.record_event({
64 'action': 'list',
65 'object_type': 'user',
66 'pending': pending,
67 })
68
69 # order results according to passed order parameter,
70 # special-casing search queries where the database
71 # provides an order by search rank
72 return order_results(users, fallback=bool(search_term))
73
74 @require_permission('list_users')
75 def get(self):
76 page = request.args.get('page', 1, type=int)
77 page_size = request.args.get('page_size', 25, type=int)
78
79 groups = {group.id: group for group in models.Group.all(self.current_org)}
80
81 def serialize_user(user):
82 d = user.to_dict()
83 user_groups = []
84 for group_id in set(d['groups']):
85 group = groups.get(group_id)
86
87 if group:
88 user_groups.append({'id': group.id, 'name': group.name})
89
90 d['groups'] = user_groups
91
92 return d
93
94 search_term = request.args.get('q', '')
95
96 disabled = request.args.get('disabled', 'false') # get enabled users by default
97 disabled = parse_boolean(disabled)
98
99 pending = request.args.get('pending', None) # get both active and pending by default
100 if pending is not None:
101 pending = parse_boolean(pending)
102
103 users = self.get_users(disabled, pending, search_term)
104
105 return paginate(users, page, page_size, serialize_user)
106
107 @require_admin
108 def post(self):
109 req = request.get_json(force=True)
110 require_fields(req, ('name', 'email'))
111
112 if '@' not in req['email']:
113 abort(400, message='Bad email address.')
114 name, domain = req['email'].split('@', 1)
115
116 if domain.lower() in blacklist or domain.lower() == 'qq.com':
117 abort(400, message='Bad email address.')
118
119 user = models.User(org=self.current_org,
120 name=req['name'],
121 email=req['email'],
122 is_invitation_pending=True,
123 group_ids=[self.current_org.default_group.id])
124
125 try:
126 models.db.session.add(user)
127 models.db.session.commit()
128 except IntegrityError as e:
129 if "email" in e.message:
130 abort(400, message='Email already taken.')
131 abort(500)
132
133 self.record_event({
134 'action': 'create',
135 'object_id': user.id,
136 'object_type': 'user'
137 })
138
139 should_send_invitation = 'no_invite' not in request.args
140 if should_send_invitation:
141 invite_user(self.current_org, self.current_user, user)
142
143 return user.to_dict()
144
145
146 class UserInviteResource(BaseResource):
147 @require_admin
148 def post(self, user_id):
149 user = models.User.get_by_id_and_org(user_id, self.current_org)
150 invite_url = invite_user(self.current_org, self.current_user, user)
151
152 return user.to_dict()
153
154
155 class UserResetPasswordResource(BaseResource):
156 @require_admin
157 def post(self, user_id):
158 user = models.User.get_by_id_and_org(user_id, self.current_org)
159 if user.is_disabled:
160 abort(404, message='Not found')
161 reset_link = send_password_reset_email(user)
162
163 return {
164 'reset_link': reset_link,
165 }
166
167
168 class UserRegenerateApiKeyResource(BaseResource):
169 def post(self, user_id):
170 user = models.User.get_by_id_and_org(user_id, self.current_org)
171 if user.is_disabled:
172 abort(404, message='Not found')
173 if not is_admin_or_owner(user_id):
174 abort(403)
175
176 user.regenerate_api_key()
177 models.db.session.commit()
178
179 self.record_event({
180 'action': 'regnerate_api_key',
181 'object_id': user.id,
182 'object_type': 'user'
183 })
184
185 return user.to_dict(with_api_key=True)
186
187
188 class UserResource(BaseResource):
189 def get(self, user_id):
190 require_permission_or_owner('list_users', user_id)
191 user = get_object_or_404(models.User.get_by_id_and_org, user_id, self.current_org)
192
193 self.record_event({
194 'action': 'view',
195 'object_id': user_id,
196 'object_type': 'user',
197 })
198
199 return user.to_dict(with_api_key=is_admin_or_owner(user_id))
200
201 def post(self, user_id):
202 require_admin_or_owner(user_id)
203 user = models.User.get_by_id_and_org(user_id, self.current_org)
204
205 req = request.get_json(True)
206
207 params = project(req, ('email', 'name', 'password', 'old_password', 'groups'))
208
209 if 'password' in params and 'old_password' not in params:
210 abort(403, message="Must provide current password to update password.")
211
212 if 'old_password' in params and not user.verify_password(params['old_password']):
213 abort(403, message="Incorrect current password.")
214
215 if 'password' in params:
216 user.hash_password(params.pop('password'))
217 params.pop('old_password')
218
219 if 'groups' in params and not self.current_user.has_permission('admin'):
220 abort(403, message="Must be admin to change groups membership.")
221
222 if 'email' in params:
223 _, domain = params['email'].split('@', 1)
224
225 if domain.lower() in blacklist or domain.lower() == 'qq.com':
226 abort(400, message='Bad email address.')
227
228 email_changed = 'email' in params and params['email'] != user.email
229 if email_changed:
230 user.is_email_verified = False
231
232 try:
233 self.update_model(user, params)
234 models.db.session.commit()
235
236 if email_changed:
237 send_verify_email(user, self.current_org)
238
239 # The user has updated their email or password. This should invalidate all _other_ sessions,
240 # forcing them to log in again. Since we don't want to force _this_ session to have to go
241 # through login again, we call `login_user` in order to update the session with the new identity details.
242 if current_user.id == user.id:
243 login_user(user, remember=True)
244 except IntegrityError as e:
245 if "email" in e.message:
246 message = "Email already taken."
247 else:
248 message = "Error updating record"
249
250 abort(400, message=message)
251
252 self.record_event({
253 'action': 'edit',
254 'object_id': user.id,
255 'object_type': 'user',
256 'updated_fields': params.keys()
257 })
258
259 return user.to_dict(with_api_key=is_admin_or_owner(user_id))
260
261 @require_admin
262 def delete(self, user_id):
263 user = models.User.get_by_id_and_org(user_id, self.current_org)
264 # admin cannot delete self; current user is an admin (`@require_admin`)
265 # so just check user id
266 if user.id == current_user.id:
267 abort(403, message="You cannot delete your own account. "
268 "Please ask another admin to do this for you.")
269 elif not user.is_invitation_pending:
270 abort(403, message="You cannot delete activated users. "
271 "Please disable the user instead.")
272 models.db.session.delete(user)
273 models.db.session.commit()
274
275 return user.to_dict(with_api_key=is_admin_or_owner(user_id))
276
277
278 class UserDisableResource(BaseResource):
279 @require_admin
280 def post(self, user_id):
281 user = models.User.get_by_id_and_org(user_id, self.current_org)
282 # admin cannot disable self; current user is an admin (`@require_admin`)
283 # so just check user id
284 if user.id == current_user.id:
285 abort(403, message="You cannot disable your own account. "
286 "Please ask another admin to do this for you.")
287 user.disable()
288 models.db.session.commit()
289
290 return user.to_dict(with_api_key=is_admin_or_owner(user_id))
291
292 @require_admin
293 def delete(self, user_id):
294 user = models.User.get_by_id_and_org(user_id, self.current_org)
295 user.enable()
296 models.db.session.commit()
297
298 return user.to_dict(with_api_key=is_admin_or_owner(user_id))
299
[end of redash/handlers/users.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/redash/handlers/users.py b/redash/handlers/users.py
--- a/redash/handlers/users.py
+++ b/redash/handlers/users.py
@@ -15,6 +15,7 @@
from redash.authentication.account import invite_link_for_user, send_invite_email, send_password_reset_email, send_verify_email
from redash.settings import parse_boolean
+from redash import settings
# Ordering map for relationships
@@ -36,9 +37,17 @@
)
-def invite_user(org, inviter, user):
+def invite_user(org, inviter, user, send_email=True):
+ email_configured = settings.MAIL_DEFAULT_SENDER is not None
+ d = user.to_dict()
+
invite_url = invite_link_for_user(user)
- send_invite_email(inviter, user, invite_url, org)
+ if email_configured and send_email:
+ send_invite_email(inviter, user, invite_url, org)
+ else:
+ d['invite_link'] = invite_url
+
+ return d
class UserListResource(BaseResource):
@@ -137,19 +146,14 @@
})
should_send_invitation = 'no_invite' not in request.args
- if should_send_invitation:
- invite_user(self.current_org, self.current_user, user)
-
- return user.to_dict()
+ return invite_user(self.current_org, self.current_user, user, send_email=should_send_invitation)
class UserInviteResource(BaseResource):
@require_admin
def post(self, user_id):
user = models.User.get_by_id_and_org(user_id, self.current_org)
- invite_url = invite_user(self.current_org, self.current_user, user)
-
- return user.to_dict()
+ return invite_user(self.current_org, self.current_user, user)
class UserResetPasswordResource(BaseResource):
| {"golden_diff": "diff --git a/redash/handlers/users.py b/redash/handlers/users.py\n--- a/redash/handlers/users.py\n+++ b/redash/handlers/users.py\n@@ -15,6 +15,7 @@\n \n from redash.authentication.account import invite_link_for_user, send_invite_email, send_password_reset_email, send_verify_email\n from redash.settings import parse_boolean\n+from redash import settings\n \n \n # Ordering map for relationships\n@@ -36,9 +37,17 @@\n )\n \n \n-def invite_user(org, inviter, user):\n+def invite_user(org, inviter, user, send_email=True):\n+ email_configured = settings.MAIL_DEFAULT_SENDER is not None\n+ d = user.to_dict()\n+\n invite_url = invite_link_for_user(user)\n- send_invite_email(inviter, user, invite_url, org)\n+ if email_configured and send_email:\n+ send_invite_email(inviter, user, invite_url, org)\n+ else:\n+ d['invite_link'] = invite_url\n+\n+ return d\n \n \n class UserListResource(BaseResource):\n@@ -137,19 +146,14 @@\n })\n \n should_send_invitation = 'no_invite' not in request.args\n- if should_send_invitation:\n- invite_user(self.current_org, self.current_user, user)\n-\n- return user.to_dict()\n+ return invite_user(self.current_org, self.current_user, user, send_email=should_send_invitation)\n \n \n class UserInviteResource(BaseResource):\n @require_admin\n def post(self, user_id):\n user = models.User.get_by_id_and_org(user_id, self.current_org)\n- invite_url = invite_user(self.current_org, self.current_user, user)\n-\n- return user.to_dict()\n+ return invite_user(self.current_org, self.current_user, user)\n \n \n class UserResetPasswordResource(BaseResource):\n", "issue": "No invitation link after adding a new user when email server is not configured\n<!--\r\n\r\nWe use GitHub only for bug reports \ud83d\udc1b\r\n\r\nAnything else should be posted to https://discuss.redash.io \ud83d\udc6b\r\n\r\n\ud83d\udea8For support, help & questions use https://discuss.redash.io/c/support\r\n\ud83d\udca1For feature requests & ideas use https://discuss.redash.io/c/feature-requests\r\n\r\n**Found a security vulnerability?** Please email [email protected] to report any security vulnerabilities. We will acknowledge receipt of your vulnerability and strive to send you regular updates about our progress. If you're curious about the status of your disclosure please feel free to email us again. If you want to encrypt your disclosure email, you can use this PGP key.\r\n\r\n-->\r\n\r\n### Issue Summary\r\n\r\nAfter #3267 and #3229 admins no longer receive a link to invite users when the server doesn't have an email server configured.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Open `/users/new` in a server without email configured (e.g.: [preview](https://redash-preview.netlify.com/users/new))\r\n2. Create a new user and check that no link is provided.\r\n3. Open the pending invitation User Profile and notice there's only the \"Resend\" option (again it leads to nowhere)\r\n\r\nAny other info e.g. Why do you consider this to be a bug? What did you expect to happen instead?\r\nThe admin should receive a link to send to the user manually.\r\n\r\n### Technical details:\r\n\r\n* Redash Version: Latest (Redash Preview)\r\n* Browser/OS: --\r\n* How did you install Redash: --\r\n\n", "before_files": [{"content": "import re\nimport time\nfrom flask import request\nfrom flask_restful import abort\nfrom flask_login import current_user, login_user\nfrom funcy import project\nfrom sqlalchemy.exc import IntegrityError\nfrom disposable_email_domains import blacklist\nfrom funcy import partial\n\nfrom redash import models\nfrom redash.permissions import require_permission, require_admin_or_owner, is_admin_or_owner, \\\n require_permission_or_owner, require_admin\nfrom redash.handlers.base import BaseResource, require_fields, get_object_or_404, paginate, order_results as _order_results\n\nfrom redash.authentication.account import invite_link_for_user, send_invite_email, send_password_reset_email, send_verify_email\nfrom redash.settings import parse_boolean\n\n\n# Ordering map for relationships\norder_map = {\n 'name': 'name',\n '-name': '-name',\n 'active_at': 'active_at',\n '-active_at': '-active_at',\n 'created_at': 'created_at',\n '-created_at': '-created_at',\n 'groups': 'group_ids',\n '-groups': '-group_ids',\n}\n\norder_results = partial(\n _order_results,\n default_order='-created_at',\n allowed_orders=order_map,\n)\n\n\ndef invite_user(org, inviter, user):\n invite_url = invite_link_for_user(user)\n send_invite_email(inviter, user, invite_url, org)\n\n\nclass UserListResource(BaseResource):\n def get_users(self, disabled, pending, search_term):\n if disabled:\n users = models.User.all_disabled(self.current_org)\n else:\n users = models.User.all(self.current_org)\n\n if pending is not None:\n users = models.User.pending(users, pending)\n\n if search_term:\n users = models.User.search(users, search_term)\n self.record_event({\n 'action': 'search',\n 'object_type': 'user',\n 'term': search_term,\n 'pending': pending,\n })\n else:\n self.record_event({\n 'action': 'list',\n 'object_type': 'user',\n 'pending': pending,\n })\n\n # order results according to passed order parameter,\n # special-casing search queries where the database\n # provides an order by search rank\n return order_results(users, fallback=bool(search_term))\n\n @require_permission('list_users')\n def get(self):\n page = request.args.get('page', 1, type=int)\n page_size = request.args.get('page_size', 25, type=int)\n\n groups = {group.id: group for group in models.Group.all(self.current_org)}\n\n def serialize_user(user):\n d = user.to_dict()\n user_groups = []\n for group_id in set(d['groups']):\n group = groups.get(group_id)\n\n if group:\n user_groups.append({'id': group.id, 'name': group.name})\n\n d['groups'] = user_groups\n\n return d\n\n search_term = request.args.get('q', '')\n\n disabled = request.args.get('disabled', 'false') # get enabled users by default\n disabled = parse_boolean(disabled)\n\n pending = request.args.get('pending', None) # get both active and pending by default\n if pending is not None:\n pending = parse_boolean(pending)\n\n users = self.get_users(disabled, pending, search_term)\n\n return paginate(users, page, page_size, serialize_user)\n\n @require_admin\n def post(self):\n req = request.get_json(force=True)\n require_fields(req, ('name', 'email'))\n\n if '@' not in req['email']:\n abort(400, message='Bad email address.')\n name, domain = req['email'].split('@', 1)\n\n if domain.lower() in blacklist or domain.lower() == 'qq.com':\n abort(400, message='Bad email address.')\n\n user = models.User(org=self.current_org,\n name=req['name'],\n email=req['email'],\n is_invitation_pending=True,\n group_ids=[self.current_org.default_group.id])\n\n try:\n models.db.session.add(user)\n models.db.session.commit()\n except IntegrityError as e:\n if \"email\" in e.message:\n abort(400, message='Email already taken.')\n abort(500)\n\n self.record_event({\n 'action': 'create',\n 'object_id': user.id,\n 'object_type': 'user'\n })\n\n should_send_invitation = 'no_invite' not in request.args\n if should_send_invitation:\n invite_user(self.current_org, self.current_user, user)\n\n return user.to_dict()\n\n\nclass UserInviteResource(BaseResource):\n @require_admin\n def post(self, user_id):\n user = models.User.get_by_id_and_org(user_id, self.current_org)\n invite_url = invite_user(self.current_org, self.current_user, user)\n\n return user.to_dict()\n\n\nclass UserResetPasswordResource(BaseResource):\n @require_admin\n def post(self, user_id):\n user = models.User.get_by_id_and_org(user_id, self.current_org)\n if user.is_disabled:\n abort(404, message='Not found')\n reset_link = send_password_reset_email(user)\n\n return {\n 'reset_link': reset_link,\n }\n\n\nclass UserRegenerateApiKeyResource(BaseResource):\n def post(self, user_id):\n user = models.User.get_by_id_and_org(user_id, self.current_org)\n if user.is_disabled:\n abort(404, message='Not found')\n if not is_admin_or_owner(user_id):\n abort(403)\n\n user.regenerate_api_key()\n models.db.session.commit()\n\n self.record_event({\n 'action': 'regnerate_api_key',\n 'object_id': user.id,\n 'object_type': 'user'\n })\n\n return user.to_dict(with_api_key=True)\n\n\nclass UserResource(BaseResource):\n def get(self, user_id):\n require_permission_or_owner('list_users', user_id)\n user = get_object_or_404(models.User.get_by_id_and_org, user_id, self.current_org)\n\n self.record_event({\n 'action': 'view',\n 'object_id': user_id,\n 'object_type': 'user',\n })\n\n return user.to_dict(with_api_key=is_admin_or_owner(user_id))\n\n def post(self, user_id):\n require_admin_or_owner(user_id)\n user = models.User.get_by_id_and_org(user_id, self.current_org)\n\n req = request.get_json(True)\n\n params = project(req, ('email', 'name', 'password', 'old_password', 'groups'))\n\n if 'password' in params and 'old_password' not in params:\n abort(403, message=\"Must provide current password to update password.\")\n\n if 'old_password' in params and not user.verify_password(params['old_password']):\n abort(403, message=\"Incorrect current password.\")\n\n if 'password' in params:\n user.hash_password(params.pop('password'))\n params.pop('old_password')\n\n if 'groups' in params and not self.current_user.has_permission('admin'):\n abort(403, message=\"Must be admin to change groups membership.\")\n\n if 'email' in params:\n _, domain = params['email'].split('@', 1)\n\n if domain.lower() in blacklist or domain.lower() == 'qq.com':\n abort(400, message='Bad email address.')\n\n email_changed = 'email' in params and params['email'] != user.email\n if email_changed:\n user.is_email_verified = False\n\n try:\n self.update_model(user, params)\n models.db.session.commit()\n\n if email_changed:\n send_verify_email(user, self.current_org)\n\n # The user has updated their email or password. This should invalidate all _other_ sessions,\n # forcing them to log in again. Since we don't want to force _this_ session to have to go\n # through login again, we call `login_user` in order to update the session with the new identity details.\n if current_user.id == user.id:\n login_user(user, remember=True)\n except IntegrityError as e:\n if \"email\" in e.message:\n message = \"Email already taken.\"\n else:\n message = \"Error updating record\"\n\n abort(400, message=message)\n\n self.record_event({\n 'action': 'edit',\n 'object_id': user.id,\n 'object_type': 'user',\n 'updated_fields': params.keys()\n })\n\n return user.to_dict(with_api_key=is_admin_or_owner(user_id))\n\n @require_admin\n def delete(self, user_id):\n user = models.User.get_by_id_and_org(user_id, self.current_org)\n # admin cannot delete self; current user is an admin (`@require_admin`)\n # so just check user id\n if user.id == current_user.id:\n abort(403, message=\"You cannot delete your own account. \"\n \"Please ask another admin to do this for you.\")\n elif not user.is_invitation_pending:\n abort(403, message=\"You cannot delete activated users. \"\n \"Please disable the user instead.\")\n models.db.session.delete(user)\n models.db.session.commit()\n\n return user.to_dict(with_api_key=is_admin_or_owner(user_id))\n\n\nclass UserDisableResource(BaseResource):\n @require_admin\n def post(self, user_id):\n user = models.User.get_by_id_and_org(user_id, self.current_org)\n # admin cannot disable self; current user is an admin (`@require_admin`)\n # so just check user id\n if user.id == current_user.id:\n abort(403, message=\"You cannot disable your own account. \"\n \"Please ask another admin to do this for you.\")\n user.disable()\n models.db.session.commit()\n\n return user.to_dict(with_api_key=is_admin_or_owner(user_id))\n\n @require_admin\n def delete(self, user_id):\n user = models.User.get_by_id_and_org(user_id, self.current_org)\n user.enable()\n models.db.session.commit()\n\n return user.to_dict(with_api_key=is_admin_or_owner(user_id))\n", "path": "redash/handlers/users.py"}]} | 3,882 | 417 |
gh_patches_debug_44579 | rasdani/github-patches | git_diff | great-expectations__great_expectations-858 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
great_expectations.yml has broken links
Working off the latest version:
```
→ great_expectations --version
great_expectations, version 0.8.4.post0
```
YAML file generated from `great_expectations init` has the following contents:
```yaml
# Welcome to Great Expectations! Always know what to expect from your data.
#
# Here you can define datasources, generators, integrations and more. This file
# is intended to be committed to your repo. For help with configuration please:
# - Read our docs: https://docs.greatexpectations.io/en/0_8_4_post0/reference/data_context_reference.html#configuration
# - Join our slack channel: http://greatexpectations.io/slack
#
# NOTE: GE uses the names of configured `datasources` and `generators` to manage
# how `expectations` and other artifacts are stored in the `expectations/` and
# `datasources/` folders. If you need to rename an existing `datasource` or
# `generator`, be sure to also update the relevant directory names.
config_version: 1
# Datasources tell Great Expectations where your data lives and how to get it.
# You can use the CLI command `great_expectations add-datasource` to help you
# add a new datasource. Read more at https://docs.greatexpectations.io/en/0_8_4_post0/features/datasource.html
datasources:
test:
class_name: PandasDatasource
data_asset_type:
class_name: PandasDataset
generators:
default:
class_name: SubdirReaderGenerator
base_directory: ../misc
reader_options:
sep:
engine: python
```
All the docs linked throughout the yaml file, i.e. https://docs.greatexpectations.io/en/0_8_4_post0/features/datasource.html, do not exist.
Missing link/reference in docs?
https://docs.greatexpectations.io/en/latest/reference/data_context_reference.html#validation-operators

Assuming the underlined part should have a hyperlink?
</issue>
<code>
[start of great_expectations/data_context/templates.py]
1 # -*- coding: utf-8 -*-
2 from great_expectations import rtd_url_ge_version
3
4 PROJECT_HELP_COMMENT = """
5 # Welcome to Great Expectations! Always know what to expect from your data.
6 #
7 # Here you can define datasources, generators, integrations and more. This file
8 # is intended to be committed to your repo. For help with configuration please:
9 # - Read our docs: https://docs.greatexpectations.io/en/""" + rtd_url_ge_version + """/reference/data_context_reference.html#configuration
10 # - Join our slack channel: http://greatexpectations.io/slack
11 #
12 # NOTE: GE uses the names of configured `datasources` and `generators` to manage
13 # how `expectations` and other artifacts are stored in the `expectations/` and
14 # `datasources/` folders. If you need to rename an existing `datasource` or
15 # `generator`, be sure to also update the relevant directory names.
16
17 config_version: 1
18
19 # Datasources tell Great Expectations where your data lives and how to get it.
20 # You can use the CLI command `great_expectations add-datasource` to help you
21 # add a new datasource. Read more at https://docs.greatexpectations.io/en/""" + rtd_url_ge_version + """/features/datasource.html
22 datasources: {}
23 """
24
25 CONFIG_VARIABLES_INTRO = """
26 # This config file supports variable substitution which enables: 1) keeping
27 # secrets out of source control & 2) environment-based configuration changes
28 # such as staging vs prod.
29 #
30 # When GE encounters substitution syntax (like `my_key: ${my_value}` or
31 # `my_key: $my_value`) in the config file it will attempt to replace the value
32 # of `my_key` with the value from an environment variable `my_value` or a
33 # corresponding key read from the file specified using
34 # `config_variables_file_path`. Environment variables take precedence.
35 #
36 # If the substitution value comes from the config variables file, it can be a
37 # simple (non-nested) value or a nested value such as a dictionary. If it comes
38 # from an environment variable, it must be a simple value. Read more at:
39 # https://docs.greatexpectations.io/en/""" + rtd_url_ge_version + """/reference/data_context_reference.html#managing-environment-and-secrets"""
40
41 # This junky fake key keeps ruaml.yml from nuking the important comment block
42 CONFIG_VARIABLES_TEMPLATE = "ge_comment_preservation_key: 1" + CONFIG_VARIABLES_INTRO
43
44 PROJECT_OPTIONAL_CONFIG_COMMENT = CONFIG_VARIABLES_INTRO + """
45 config_variables_file_path: uncommitted/config_variables.yml
46
47 # The plugins_directory will be added to your python path for custom modules
48 # used to override and extend Great Expectations.
49 plugins_directory: plugins/
50
51 # Validation Operators are customizable workflows that bundle the validation of
52 # one or more expectation suites and subsequent actions. The example below
53 # stores validations and send a slack notification. To read more about
54 # customizing and extending these, read: https://docs.greatexpectations.io/en/""" + rtd_url_ge_version + """/features/validation_operators_and_actions.html
55 validation_operators:
56 action_list_operator:
57 # To learn how to configure sending Slack notifications during evaluation
58 # (and other customizations), read: https://docs.greatexpectations.io/en/""" + rtd_url_ge_version + """/reference/validation_operators/action_list_validation_operator.html
59 class_name: ActionListValidationOperator
60 action_list:
61 - name: store_validation_result
62 action:
63 class_name: StoreAction
64 - name: store_evaluation_params
65 action:
66 class_name: ExtractAndStoreEvaluationParamsAction
67 - name: update_data_docs
68 action:
69 class_name: UpdateDataDocsAction
70 - name: send_slack_notification_on_validation_result
71 action:
72 class_name: SlackNotificationAction
73 # put the actual webhook URL in the uncommitted/config_variables.yml file
74 slack_webhook: ${validation_notification_slack_webhook}
75 notify_on: all # possible values: "all", "failure", "success"
76 renderer:
77 module_name: great_expectations.render.renderer.slack_renderer
78 class_name: SlackRenderer
79
80 stores:
81 # Stores are configurable places to store things like Expectations, Validations
82 # Data Docs, and more. These are for advanced users only - most users can simply
83 # leave this section alone.
84 #
85 # Three stores are required: expectations, validations, and
86 # evaluation_parameters, and must exist with a valid store entry. Additional
87 # stores can be configured for uses such as data_docs, validation_operators, etc.
88 expectations_store:
89 class_name: ExpectationsStore
90 store_backend:
91 class_name: FixedLengthTupleFilesystemStoreBackend
92 base_directory: expectations/
93
94 validations_store:
95 class_name: ValidationsStore
96 store_backend:
97 class_name: FixedLengthTupleFilesystemStoreBackend
98 base_directory: uncommitted/validations/
99
100 evaluation_parameter_store:
101 # Evaluation Parameters enable dynamic expectations. Read more here:
102 # https://docs.greatexpectations.io/en/""" + rtd_url_ge_version + """/reference/evaluation_parameters.html
103 class_name: InMemoryEvaluationParameterStore
104
105 expectations_store_name: expectations_store
106 validations_store_name: validations_store
107 evaluation_parameter_store_name: evaluation_parameter_store
108
109 data_docs_sites:
110 # Data Docs make it simple to visualize data quality in your project. These
111 # include Expectations, Validations & Profiles. The are built for all
112 # Datasources from JSON artifacts in the local repo including validations &
113 # profiles from the uncommitted directory. Read more at https://docs.greatexpectations.io/en/""" + rtd_url_ge_version + """/features/data_docs.html
114 local_site:
115 class_name: SiteBuilder
116 store_backend:
117 class_name: FixedLengthTupleFilesystemStoreBackend
118 base_directory: uncommitted/data_docs/local_site/
119 site_index_builder:
120 show_cta_footer: True
121 """
122
123 PROJECT_TEMPLATE = PROJECT_HELP_COMMENT + PROJECT_OPTIONAL_CONFIG_COMMENT
124
[end of great_expectations/data_context/templates.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/great_expectations/data_context/templates.py b/great_expectations/data_context/templates.py
--- a/great_expectations/data_context/templates.py
+++ b/great_expectations/data_context/templates.py
@@ -6,7 +6,7 @@
#
# Here you can define datasources, generators, integrations and more. This file
# is intended to be committed to your repo. For help with configuration please:
-# - Read our docs: https://docs.greatexpectations.io/en/""" + rtd_url_ge_version + """/reference/data_context_reference.html#configuration
+# - Read our docs: https://docs.greatexpectations.io/en/latest/reference/data_context_reference.html#configuration
# - Join our slack channel: http://greatexpectations.io/slack
#
# NOTE: GE uses the names of configured `datasources` and `generators` to manage
@@ -18,7 +18,7 @@
# Datasources tell Great Expectations where your data lives and how to get it.
# You can use the CLI command `great_expectations add-datasource` to help you
-# add a new datasource. Read more at https://docs.greatexpectations.io/en/""" + rtd_url_ge_version + """/features/datasource.html
+# add a new datasource. Read more at https://docs.greatexpectations.io/en/latest/features/datasource.html
datasources: {}
"""
@@ -36,7 +36,7 @@
# If the substitution value comes from the config variables file, it can be a
# simple (non-nested) value or a nested value such as a dictionary. If it comes
# from an environment variable, it must be a simple value. Read more at:
-# https://docs.greatexpectations.io/en/""" + rtd_url_ge_version + """/reference/data_context_reference.html#managing-environment-and-secrets"""
+# https://docs.greatexpectations.io/en/latest/reference/data_context_reference.html#managing-environment-and-secrets"""
# This junky fake key keeps ruaml.yml from nuking the important comment block
CONFIG_VARIABLES_TEMPLATE = "ge_comment_preservation_key: 1" + CONFIG_VARIABLES_INTRO
@@ -51,11 +51,11 @@
# Validation Operators are customizable workflows that bundle the validation of
# one or more expectation suites and subsequent actions. The example below
# stores validations and send a slack notification. To read more about
-# customizing and extending these, read: https://docs.greatexpectations.io/en/""" + rtd_url_ge_version + """/features/validation_operators_and_actions.html
+# customizing and extending these, read: https://docs.greatexpectations.io/en/latest/features/validation_operators_and_actions.html
validation_operators:
action_list_operator:
# To learn how to configure sending Slack notifications during evaluation
- # (and other customizations), read: https://docs.greatexpectations.io/en/""" + rtd_url_ge_version + """/reference/validation_operators/action_list_validation_operator.html
+ # (and other customizations), read: https://docs.greatexpectations.io/en/latest/reference/validation_operators/action_list_validation_operator.html
class_name: ActionListValidationOperator
action_list:
- name: store_validation_result
@@ -99,7 +99,7 @@
evaluation_parameter_store:
# Evaluation Parameters enable dynamic expectations. Read more here:
- # https://docs.greatexpectations.io/en/""" + rtd_url_ge_version + """/reference/evaluation_parameters.html
+ # https://docs.greatexpectations.io/en/latest/reference/evaluation_parameters.html
class_name: InMemoryEvaluationParameterStore
expectations_store_name: expectations_store
@@ -110,7 +110,7 @@
# Data Docs make it simple to visualize data quality in your project. These
# include Expectations, Validations & Profiles. The are built for all
# Datasources from JSON artifacts in the local repo including validations &
- # profiles from the uncommitted directory. Read more at https://docs.greatexpectations.io/en/""" + rtd_url_ge_version + """/features/data_docs.html
+ # profiles from the uncommitted directory. Read more at https://docs.greatexpectations.io/en/latest/features/data_docs.html
local_site:
class_name: SiteBuilder
store_backend:
| {"golden_diff": "diff --git a/great_expectations/data_context/templates.py b/great_expectations/data_context/templates.py\n--- a/great_expectations/data_context/templates.py\n+++ b/great_expectations/data_context/templates.py\n@@ -6,7 +6,7 @@\n # \n # Here you can define datasources, generators, integrations and more. This file\n # is intended to be committed to your repo. For help with configuration please:\n-# - Read our docs: https://docs.greatexpectations.io/en/\"\"\" + rtd_url_ge_version + \"\"\"/reference/data_context_reference.html#configuration\n+# - Read our docs: https://docs.greatexpectations.io/en/latest/reference/data_context_reference.html#configuration\n # - Join our slack channel: http://greatexpectations.io/slack\n #\n # NOTE: GE uses the names of configured `datasources` and `generators` to manage\n@@ -18,7 +18,7 @@\n \n # Datasources tell Great Expectations where your data lives and how to get it.\n # You can use the CLI command `great_expectations add-datasource` to help you\n-# add a new datasource. Read more at https://docs.greatexpectations.io/en/\"\"\" + rtd_url_ge_version + \"\"\"/features/datasource.html\n+# add a new datasource. Read more at https://docs.greatexpectations.io/en/latest/features/datasource.html\n datasources: {}\n \"\"\"\n \n@@ -36,7 +36,7 @@\n # If the substitution value comes from the config variables file, it can be a\n # simple (non-nested) value or a nested value such as a dictionary. If it comes\n # from an environment variable, it must be a simple value. Read more at:\n-# https://docs.greatexpectations.io/en/\"\"\" + rtd_url_ge_version + \"\"\"/reference/data_context_reference.html#managing-environment-and-secrets\"\"\"\n+# https://docs.greatexpectations.io/en/latest/reference/data_context_reference.html#managing-environment-and-secrets\"\"\"\n \n # This junky fake key keeps ruaml.yml from nuking the important comment block\n CONFIG_VARIABLES_TEMPLATE = \"ge_comment_preservation_key: 1\" + CONFIG_VARIABLES_INTRO\n@@ -51,11 +51,11 @@\n # Validation Operators are customizable workflows that bundle the validation of\n # one or more expectation suites and subsequent actions. The example below\n # stores validations and send a slack notification. To read more about\n-# customizing and extending these, read: https://docs.greatexpectations.io/en/\"\"\" + rtd_url_ge_version + \"\"\"/features/validation_operators_and_actions.html\n+# customizing and extending these, read: https://docs.greatexpectations.io/en/latest/features/validation_operators_and_actions.html\n validation_operators:\n action_list_operator:\n # To learn how to configure sending Slack notifications during evaluation\n- # (and other customizations), read: https://docs.greatexpectations.io/en/\"\"\" + rtd_url_ge_version + \"\"\"/reference/validation_operators/action_list_validation_operator.html\n+ # (and other customizations), read: https://docs.greatexpectations.io/en/latest/reference/validation_operators/action_list_validation_operator.html\n class_name: ActionListValidationOperator\n action_list:\n - name: store_validation_result\n@@ -99,7 +99,7 @@\n \n evaluation_parameter_store:\n # Evaluation Parameters enable dynamic expectations. Read more here:\n- # https://docs.greatexpectations.io/en/\"\"\" + rtd_url_ge_version + \"\"\"/reference/evaluation_parameters.html\n+ # https://docs.greatexpectations.io/en/latest/reference/evaluation_parameters.html\n class_name: InMemoryEvaluationParameterStore\n \n expectations_store_name: expectations_store\n@@ -110,7 +110,7 @@\n # Data Docs make it simple to visualize data quality in your project. These\n # include Expectations, Validations & Profiles. The are built for all\n # Datasources from JSON artifacts in the local repo including validations &\n- # profiles from the uncommitted directory. Read more at https://docs.greatexpectations.io/en/\"\"\" + rtd_url_ge_version + \"\"\"/features/data_docs.html\n+ # profiles from the uncommitted directory. Read more at https://docs.greatexpectations.io/en/latest/features/data_docs.html\n local_site:\n class_name: SiteBuilder\n store_backend:\n", "issue": "great_expectations.yml has broken links\nWorking off the latest version:\r\n\r\n```\r\n\u2192 great_expectations --version\r\n\r\ngreat_expectations, version 0.8.4.post0\r\n```\r\n\r\n\r\nYAML file generated from `great_expectations init` has the following contents:\r\n\r\n```yaml\r\n\r\n# Welcome to Great Expectations! Always know what to expect from your data.\r\n# \r\n# Here you can define datasources, generators, integrations and more. This file\r\n# is intended to be committed to your repo. For help with configuration please:\r\n# - Read our docs: https://docs.greatexpectations.io/en/0_8_4_post0/reference/data_context_reference.html#configuration\r\n# - Join our slack channel: http://greatexpectations.io/slack\r\n#\r\n# NOTE: GE uses the names of configured `datasources` and `generators` to manage\r\n# how `expectations` and other artifacts are stored in the `expectations/` and \r\n# `datasources/` folders. If you need to rename an existing `datasource` or \r\n# `generator`, be sure to also update the relevant directory names.\r\n\r\nconfig_version: 1\r\n\r\n# Datasources tell Great Expectations where your data lives and how to get it.\r\n# You can use the CLI command `great_expectations add-datasource` to help you\r\n# add a new datasource. Read more at https://docs.greatexpectations.io/en/0_8_4_post0/features/datasource.html\r\ndatasources:\r\n test:\r\n class_name: PandasDatasource\r\n data_asset_type:\r\n class_name: PandasDataset\r\n generators:\r\n default:\r\n class_name: SubdirReaderGenerator\r\n base_directory: ../misc\r\n reader_options:\r\n sep:\r\n engine: python\r\n```\r\n\r\n\r\nAll the docs linked throughout the yaml file, i.e. https://docs.greatexpectations.io/en/0_8_4_post0/features/datasource.html, do not exist.\r\n\nMissing link/reference in docs?\nhttps://docs.greatexpectations.io/en/latest/reference/data_context_reference.html#validation-operators\r\n\r\n\r\n\r\nAssuming the underlined part should have a hyperlink?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom great_expectations import rtd_url_ge_version\n\nPROJECT_HELP_COMMENT = \"\"\"\n# Welcome to Great Expectations! Always know what to expect from your data.\n# \n# Here you can define datasources, generators, integrations and more. This file\n# is intended to be committed to your repo. For help with configuration please:\n# - Read our docs: https://docs.greatexpectations.io/en/\"\"\" + rtd_url_ge_version + \"\"\"/reference/data_context_reference.html#configuration\n# - Join our slack channel: http://greatexpectations.io/slack\n#\n# NOTE: GE uses the names of configured `datasources` and `generators` to manage\n# how `expectations` and other artifacts are stored in the `expectations/` and \n# `datasources/` folders. If you need to rename an existing `datasource` or \n# `generator`, be sure to also update the relevant directory names.\n\nconfig_version: 1\n\n# Datasources tell Great Expectations where your data lives and how to get it.\n# You can use the CLI command `great_expectations add-datasource` to help you\n# add a new datasource. Read more at https://docs.greatexpectations.io/en/\"\"\" + rtd_url_ge_version + \"\"\"/features/datasource.html\ndatasources: {}\n\"\"\"\n\nCONFIG_VARIABLES_INTRO = \"\"\"\n# This config file supports variable substitution which enables: 1) keeping\n# secrets out of source control & 2) environment-based configuration changes\n# such as staging vs prod.\n#\n# When GE encounters substitution syntax (like `my_key: ${my_value}` or \n# `my_key: $my_value`) in the config file it will attempt to replace the value\n# of `my_key` with the value from an environment variable `my_value` or a\n# corresponding key read from the file specified using\n# `config_variables_file_path`. Environment variables take precedence.\n#\n# If the substitution value comes from the config variables file, it can be a\n# simple (non-nested) value or a nested value such as a dictionary. If it comes\n# from an environment variable, it must be a simple value. Read more at:\n# https://docs.greatexpectations.io/en/\"\"\" + rtd_url_ge_version + \"\"\"/reference/data_context_reference.html#managing-environment-and-secrets\"\"\"\n\n# This junky fake key keeps ruaml.yml from nuking the important comment block\nCONFIG_VARIABLES_TEMPLATE = \"ge_comment_preservation_key: 1\" + CONFIG_VARIABLES_INTRO\n\nPROJECT_OPTIONAL_CONFIG_COMMENT = CONFIG_VARIABLES_INTRO + \"\"\"\nconfig_variables_file_path: uncommitted/config_variables.yml\n\n# The plugins_directory will be added to your python path for custom modules\n# used to override and extend Great Expectations.\nplugins_directory: plugins/\n\n# Validation Operators are customizable workflows that bundle the validation of\n# one or more expectation suites and subsequent actions. The example below\n# stores validations and send a slack notification. To read more about\n# customizing and extending these, read: https://docs.greatexpectations.io/en/\"\"\" + rtd_url_ge_version + \"\"\"/features/validation_operators_and_actions.html\nvalidation_operators:\n action_list_operator:\n # To learn how to configure sending Slack notifications during evaluation\n # (and other customizations), read: https://docs.greatexpectations.io/en/\"\"\" + rtd_url_ge_version + \"\"\"/reference/validation_operators/action_list_validation_operator.html\n class_name: ActionListValidationOperator\n action_list:\n - name: store_validation_result\n action:\n class_name: StoreAction\n - name: store_evaluation_params\n action:\n class_name: ExtractAndStoreEvaluationParamsAction\n - name: update_data_docs\n action:\n class_name: UpdateDataDocsAction\n - name: send_slack_notification_on_validation_result\n action:\n class_name: SlackNotificationAction\n # put the actual webhook URL in the uncommitted/config_variables.yml file\n slack_webhook: ${validation_notification_slack_webhook}\n notify_on: all # possible values: \"all\", \"failure\", \"success\"\n renderer:\n module_name: great_expectations.render.renderer.slack_renderer\n class_name: SlackRenderer\n \nstores:\n# Stores are configurable places to store things like Expectations, Validations\n# Data Docs, and more. These are for advanced users only - most users can simply\n# leave this section alone.\n# \n# Three stores are required: expectations, validations, and\n# evaluation_parameters, and must exist with a valid store entry. Additional\n# stores can be configured for uses such as data_docs, validation_operators, etc.\n expectations_store:\n class_name: ExpectationsStore\n store_backend:\n class_name: FixedLengthTupleFilesystemStoreBackend\n base_directory: expectations/\n\n validations_store:\n class_name: ValidationsStore\n store_backend:\n class_name: FixedLengthTupleFilesystemStoreBackend\n base_directory: uncommitted/validations/\n\n evaluation_parameter_store:\n # Evaluation Parameters enable dynamic expectations. Read more here:\n # https://docs.greatexpectations.io/en/\"\"\" + rtd_url_ge_version + \"\"\"/reference/evaluation_parameters.html\n class_name: InMemoryEvaluationParameterStore\n\nexpectations_store_name: expectations_store\nvalidations_store_name: validations_store\nevaluation_parameter_store_name: evaluation_parameter_store\n\ndata_docs_sites:\n # Data Docs make it simple to visualize data quality in your project. These\n # include Expectations, Validations & Profiles. The are built for all\n # Datasources from JSON artifacts in the local repo including validations &\n # profiles from the uncommitted directory. Read more at https://docs.greatexpectations.io/en/\"\"\" + rtd_url_ge_version + \"\"\"/features/data_docs.html\n local_site:\n class_name: SiteBuilder\n store_backend:\n class_name: FixedLengthTupleFilesystemStoreBackend\n base_directory: uncommitted/data_docs/local_site/\n site_index_builder:\n show_cta_footer: True\n\"\"\"\n\nPROJECT_TEMPLATE = PROJECT_HELP_COMMENT + PROJECT_OPTIONAL_CONFIG_COMMENT\n", "path": "great_expectations/data_context/templates.py"}]} | 2,635 | 951 |
gh_patches_debug_659 | rasdani/github-patches | git_diff | pex-tool__pex-2214 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.142
On the docket:
+ [x] KeyError when locking awscli on Python 3.11 #2211
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.141"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.141"
+__version__ = "2.1.142"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.141\"\n+__version__ = \"2.1.142\"\n", "issue": "Release 2.1.142\nOn the docket:\r\n+ [x] KeyError when locking awscli on Python 3.11 #2211\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.141\"\n", "path": "pex/version.py"}]} | 621 | 98 |
gh_patches_debug_34028 | rasdani/github-patches | git_diff | ansible__awx-15011 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing tags and skip_tags on awx.awx.workflow_launch module
### Please confirm the following
- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.
- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.
- [X] I am **NOT** reporting a (potential) security vulnerability. (These should be emailed to `[email protected]` instead.)
### Bug Summary
The UI and API support launching a workflow with `skip_tags` and `job_tags` set. This is not currently possible with the `workflow_launch` module.
### AWX version
devel
### Select the relevant components
- [ ] UI
- [ ] UI (tech preview)
- [ ] API
- [ ] Docs
- [X] Collection
- [ ] CLI
- [ ] Other
### Installation method
N/A
### Modifications
no
### Ansible version
2.15.2
### Operating system
_No response_
### Web browser
_No response_
### Steps to reproduce
<img width="1106" alt="Screenshot 2024-03-19 at 11 26 20" src="https://github.com/ansible/awx/assets/9056568/878ee7f0-57d3-4365-9353-46aa6f99d009">
Through the UI you can set job_tags or skip_tags if the relevant `ask_for_` is true.
In the collection this is not possible
### Expected results
The `skip_tags` and `tags` option should be possible as below
```
- name: Kickoff workflow
awx.awx.workflow_launch:
name: wf_test
wait: false
skip_tags:
- sometag
tags:
- mytag
```
### Actual results
The options are not included on the module
### Additional information
_No response_
</issue>
<code>
[start of awx_collection/plugins/modules/workflow_launch.py]
1 #!/usr/bin/python
2 # coding: utf-8 -*-
3
4 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
5
6 from __future__ import absolute_import, division, print_function
7
8 __metaclass__ = type
9
10 ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'}
11
12 DOCUMENTATION = '''
13 ---
14 module: workflow_launch
15 author: "John Westcott IV (@john-westcott-iv)"
16 short_description: Run a workflow in Automation Platform Controller
17 description:
18 - Launch an Automation Platform Controller workflows. See
19 U(https://www.ansible.com/tower) for an overview.
20 options:
21 name:
22 description:
23 - The name of the workflow template to run.
24 required: True
25 type: str
26 aliases:
27 - workflow_template
28 organization:
29 description:
30 - Organization name, ID, or named URL the workflow job template exists in.
31 - Used to help lookup the object, cannot be modified using this module.
32 - If not provided, will lookup by name only, which does not work with duplicates.
33 type: str
34 inventory:
35 description:
36 - Inventory name, ID, or named URL to use for the job ran with this workflow, only used if prompt for inventory is set.
37 type: str
38 limit:
39 description:
40 - Limit to use for the I(job_template).
41 type: str
42 scm_branch:
43 description:
44 - A specific branch of the SCM project to run the template on.
45 - This is only applicable if your project allows for branch override.
46 type: str
47 extra_vars:
48 description:
49 - Any extra vars required to launch the job.
50 type: dict
51 wait:
52 description:
53 - Wait for the workflow to complete.
54 default: True
55 type: bool
56 interval:
57 description:
58 - The interval to request an update from the controller.
59 required: False
60 default: 2
61 type: float
62 timeout:
63 description:
64 - If waiting for the workflow to complete this will abort after this
65 amount of seconds
66 type: int
67 extends_documentation_fragment: awx.awx.auth
68 '''
69
70 RETURN = '''
71 job_info:
72 description: dictionary containing information about the workflow executed
73 returned: If workflow launched
74 type: dict
75 '''
76
77
78 EXAMPLES = '''
79 - name: Launch a workflow with a timeout of 10 seconds
80 workflow_launch:
81 workflow_template: "Test Workflow"
82 timeout: 10
83
84 - name: Launch a Workflow with extra_vars without waiting
85 workflow_launch:
86 workflow_template: "Test workflow"
87 extra_vars:
88 var1: My First Variable
89 var2: My Second Variable
90 wait: False
91 '''
92
93 from ..module_utils.controller_api import ControllerAPIModule
94
95
96 def main():
97 # Any additional arguments that are not fields of the item can be added here
98 argument_spec = dict(
99 name=dict(required=True, aliases=['workflow_template']),
100 organization=dict(),
101 inventory=dict(),
102 limit=dict(),
103 scm_branch=dict(),
104 extra_vars=dict(type='dict'),
105 wait=dict(required=False, default=True, type='bool'),
106 interval=dict(required=False, default=2.0, type='float'),
107 timeout=dict(required=False, type='int'),
108 )
109
110 # Create a module for ourselves
111 module = ControllerAPIModule(argument_spec=argument_spec)
112
113 optional_args = {}
114 # Extract our parameters
115 name = module.params.get('name')
116 organization = module.params.get('organization')
117 inventory = module.params.get('inventory')
118 wait = module.params.get('wait')
119 interval = module.params.get('interval')
120 timeout = module.params.get('timeout')
121
122 for field_name in (
123 'limit',
124 'extra_vars',
125 'scm_branch',
126 ):
127 field_val = module.params.get(field_name)
128 if field_val is not None:
129 optional_args[field_name] = field_val
130
131 # Create a datastructure to pass into our job launch
132 post_data = {}
133 for arg_name, arg_value in optional_args.items():
134 if arg_value:
135 post_data[arg_name] = arg_value
136
137 # Attempt to look up the related items the user specified (these will fail the module if not found)
138 if inventory:
139 post_data['inventory'] = module.resolve_name_to_id('inventories', inventory)
140
141 # Attempt to look up job_template based on the provided name
142 lookup_data = {}
143 if organization:
144 lookup_data['organization'] = module.resolve_name_to_id('organizations', organization)
145 workflow_job_template = module.get_one('workflow_job_templates', name_or_id=name, data=lookup_data)
146
147 if workflow_job_template is None:
148 module.fail_json(msg="Unable to find workflow job template")
149
150 # The API will allow you to submit values to a jb launch that are not prompt on launch.
151 # Therefore, we will test to see if anything is set which is not prompt on launch and fail.
152 check_vars_to_prompts = {
153 'inventory': 'ask_inventory_on_launch',
154 'limit': 'ask_limit_on_launch',
155 'scm_branch': 'ask_scm_branch_on_launch',
156 }
157
158 param_errors = []
159 for variable_name, prompt in check_vars_to_prompts.items():
160 if variable_name in post_data and not workflow_job_template[prompt]:
161 param_errors.append("The field {0} was specified but the workflow job template does not allow for it to be overridden".format(variable_name))
162 # Check if Either ask_variables_on_launch, or survey_enabled is enabled for use of extra vars.
163 if module.params.get('extra_vars') and not (workflow_job_template['ask_variables_on_launch'] or workflow_job_template['survey_enabled']):
164 param_errors.append("The field extra_vars was specified but the workflow job template does not allow for it to be overridden")
165 if len(param_errors) > 0:
166 module.fail_json(msg="Parameters specified which can not be passed into workflow job template, see errors for details", errors=param_errors)
167
168 # Launch the job
169 result = module.post_endpoint(workflow_job_template['related']['launch'], data=post_data)
170
171 if result['status_code'] != 201:
172 module.fail_json(msg="Failed to launch workflow, see response for details", response=result)
173
174 module.json_output['changed'] = True
175 module.json_output['id'] = result['json']['id']
176 module.json_output['status'] = result['json']['status']
177 # This is for backwards compatability
178 module.json_output['job_info'] = {'id': result['json']['id']}
179
180 if not wait:
181 module.exit_json(**module.json_output)
182
183 # Invoke wait function
184 module.wait_on_url(url=result['json']['url'], object_name=name, object_type='Workflow Job', timeout=timeout, interval=interval)
185
186 module.exit_json(**module.json_output)
187
188
189 if __name__ == '__main__':
190 main()
191
[end of awx_collection/plugins/modules/workflow_launch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/awx_collection/plugins/modules/workflow_launch.py b/awx_collection/plugins/modules/workflow_launch.py
--- a/awx_collection/plugins/modules/workflow_launch.py
+++ b/awx_collection/plugins/modules/workflow_launch.py
@@ -39,6 +39,16 @@
description:
- Limit to use for the I(job_template).
type: str
+ tags:
+ description:
+ - Specific tags to apply from the I(job_template).
+ type: list
+ elements: str
+ skip_tags:
+ description:
+ - Specific tags to skip from the I(job_template).
+ type: list
+ elements: str
scm_branch:
description:
- A specific branch of the SCM project to run the template on.
@@ -100,6 +110,8 @@
organization=dict(),
inventory=dict(),
limit=dict(),
+ tags=dict(type='list', elements='str'),
+ skip_tags=dict(type='list', elements='str'),
scm_branch=dict(),
extra_vars=dict(type='dict'),
wait=dict(required=False, default=True, type='bool'),
@@ -128,6 +140,14 @@
if field_val is not None:
optional_args[field_name] = field_val
+ # Special treatment of tags parameters
+ job_tags = module.params.get('tags')
+ if job_tags is not None:
+ optional_args['job_tags'] = ",".join(job_tags)
+ skip_tags = module.params.get('skip_tags')
+ if skip_tags is not None:
+ optional_args['skip_tags'] = ",".join(skip_tags)
+
# Create a datastructure to pass into our job launch
post_data = {}
for arg_name, arg_value in optional_args.items():
@@ -152,6 +172,8 @@
check_vars_to_prompts = {
'inventory': 'ask_inventory_on_launch',
'limit': 'ask_limit_on_launch',
+ 'job_tags': 'ask_tags_on_launch',
+ 'skip_tags': 'ask_skip_tags_on_launch',
'scm_branch': 'ask_scm_branch_on_launch',
}
| {"golden_diff": "diff --git a/awx_collection/plugins/modules/workflow_launch.py b/awx_collection/plugins/modules/workflow_launch.py\n--- a/awx_collection/plugins/modules/workflow_launch.py\n+++ b/awx_collection/plugins/modules/workflow_launch.py\n@@ -39,6 +39,16 @@\n description:\n - Limit to use for the I(job_template).\n type: str\n+ tags:\n+ description:\n+ - Specific tags to apply from the I(job_template).\n+ type: list\n+ elements: str\n+ skip_tags:\n+ description:\n+ - Specific tags to skip from the I(job_template).\n+ type: list\n+ elements: str\n scm_branch:\n description:\n - A specific branch of the SCM project to run the template on.\n@@ -100,6 +110,8 @@\n organization=dict(),\n inventory=dict(),\n limit=dict(),\n+ tags=dict(type='list', elements='str'),\n+ skip_tags=dict(type='list', elements='str'),\n scm_branch=dict(),\n extra_vars=dict(type='dict'),\n wait=dict(required=False, default=True, type='bool'),\n@@ -128,6 +140,14 @@\n if field_val is not None:\n optional_args[field_name] = field_val\n \n+ # Special treatment of tags parameters\n+ job_tags = module.params.get('tags')\n+ if job_tags is not None:\n+ optional_args['job_tags'] = \",\".join(job_tags)\n+ skip_tags = module.params.get('skip_tags')\n+ if skip_tags is not None:\n+ optional_args['skip_tags'] = \",\".join(skip_tags)\n+\n # Create a datastructure to pass into our job launch\n post_data = {}\n for arg_name, arg_value in optional_args.items():\n@@ -152,6 +172,8 @@\n check_vars_to_prompts = {\n 'inventory': 'ask_inventory_on_launch',\n 'limit': 'ask_limit_on_launch',\n+ 'job_tags': 'ask_tags_on_launch',\n+ 'skip_tags': 'ask_skip_tags_on_launch',\n 'scm_branch': 'ask_scm_branch_on_launch',\n }\n", "issue": "Missing tags and skip_tags on awx.awx.workflow_launch module\n### Please confirm the following\n\n- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).\n- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.\n- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.\n- [X] I am **NOT** reporting a (potential) security vulnerability. (These should be emailed to `[email protected]` instead.)\n\n### Bug Summary\n\nThe UI and API support launching a workflow with `skip_tags` and `job_tags` set. This is not currently possible with the `workflow_launch` module.\n\n### AWX version\n\ndevel\n\n### Select the relevant components\n\n- [ ] UI\n- [ ] UI (tech preview)\n- [ ] API\n- [ ] Docs\n- [X] Collection\n- [ ] CLI\n- [ ] Other\n\n### Installation method\n\nN/A\n\n### Modifications\n\nno\n\n### Ansible version\n\n2.15.2\n\n### Operating system\n\n_No response_\n\n### Web browser\n\n_No response_\n\n### Steps to reproduce\n\n<img width=\"1106\" alt=\"Screenshot 2024-03-19 at 11 26 20\" src=\"https://github.com/ansible/awx/assets/9056568/878ee7f0-57d3-4365-9353-46aa6f99d009\">\r\n\r\nThrough the UI you can set job_tags or skip_tags if the relevant `ask_for_` is true.\r\n\r\nIn the collection this is not possible\n\n### Expected results\n\nThe `skip_tags` and `tags` option should be possible as below\r\n```\r\n- name: Kickoff workflow\r\n awx.awx.workflow_launch:\r\n name: wf_test\r\n wait: false\r\n skip_tags:\r\n - sometag\r\n tags:\r\n - mytag\r\n```\n\n### Actual results\n\nThe options are not included on the module\n\n### Additional information\n\n_No response_\n", "before_files": [{"content": "#!/usr/bin/python\n# coding: utf-8 -*-\n\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\nANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'}\n\nDOCUMENTATION = '''\n---\nmodule: workflow_launch\nauthor: \"John Westcott IV (@john-westcott-iv)\"\nshort_description: Run a workflow in Automation Platform Controller\ndescription:\n - Launch an Automation Platform Controller workflows. See\n U(https://www.ansible.com/tower) for an overview.\noptions:\n name:\n description:\n - The name of the workflow template to run.\n required: True\n type: str\n aliases:\n - workflow_template\n organization:\n description:\n - Organization name, ID, or named URL the workflow job template exists in.\n - Used to help lookup the object, cannot be modified using this module.\n - If not provided, will lookup by name only, which does not work with duplicates.\n type: str\n inventory:\n description:\n - Inventory name, ID, or named URL to use for the job ran with this workflow, only used if prompt for inventory is set.\n type: str\n limit:\n description:\n - Limit to use for the I(job_template).\n type: str\n scm_branch:\n description:\n - A specific branch of the SCM project to run the template on.\n - This is only applicable if your project allows for branch override.\n type: str\n extra_vars:\n description:\n - Any extra vars required to launch the job.\n type: dict\n wait:\n description:\n - Wait for the workflow to complete.\n default: True\n type: bool\n interval:\n description:\n - The interval to request an update from the controller.\n required: False\n default: 2\n type: float\n timeout:\n description:\n - If waiting for the workflow to complete this will abort after this\n amount of seconds\n type: int\nextends_documentation_fragment: awx.awx.auth\n'''\n\nRETURN = '''\njob_info:\n description: dictionary containing information about the workflow executed\n returned: If workflow launched\n type: dict\n'''\n\n\nEXAMPLES = '''\n- name: Launch a workflow with a timeout of 10 seconds\n workflow_launch:\n workflow_template: \"Test Workflow\"\n timeout: 10\n\n- name: Launch a Workflow with extra_vars without waiting\n workflow_launch:\n workflow_template: \"Test workflow\"\n extra_vars:\n var1: My First Variable\n var2: My Second Variable\n wait: False\n'''\n\nfrom ..module_utils.controller_api import ControllerAPIModule\n\n\ndef main():\n # Any additional arguments that are not fields of the item can be added here\n argument_spec = dict(\n name=dict(required=True, aliases=['workflow_template']),\n organization=dict(),\n inventory=dict(),\n limit=dict(),\n scm_branch=dict(),\n extra_vars=dict(type='dict'),\n wait=dict(required=False, default=True, type='bool'),\n interval=dict(required=False, default=2.0, type='float'),\n timeout=dict(required=False, type='int'),\n )\n\n # Create a module for ourselves\n module = ControllerAPIModule(argument_spec=argument_spec)\n\n optional_args = {}\n # Extract our parameters\n name = module.params.get('name')\n organization = module.params.get('organization')\n inventory = module.params.get('inventory')\n wait = module.params.get('wait')\n interval = module.params.get('interval')\n timeout = module.params.get('timeout')\n\n for field_name in (\n 'limit',\n 'extra_vars',\n 'scm_branch',\n ):\n field_val = module.params.get(field_name)\n if field_val is not None:\n optional_args[field_name] = field_val\n\n # Create a datastructure to pass into our job launch\n post_data = {}\n for arg_name, arg_value in optional_args.items():\n if arg_value:\n post_data[arg_name] = arg_value\n\n # Attempt to look up the related items the user specified (these will fail the module if not found)\n if inventory:\n post_data['inventory'] = module.resolve_name_to_id('inventories', inventory)\n\n # Attempt to look up job_template based on the provided name\n lookup_data = {}\n if organization:\n lookup_data['organization'] = module.resolve_name_to_id('organizations', organization)\n workflow_job_template = module.get_one('workflow_job_templates', name_or_id=name, data=lookup_data)\n\n if workflow_job_template is None:\n module.fail_json(msg=\"Unable to find workflow job template\")\n\n # The API will allow you to submit values to a jb launch that are not prompt on launch.\n # Therefore, we will test to see if anything is set which is not prompt on launch and fail.\n check_vars_to_prompts = {\n 'inventory': 'ask_inventory_on_launch',\n 'limit': 'ask_limit_on_launch',\n 'scm_branch': 'ask_scm_branch_on_launch',\n }\n\n param_errors = []\n for variable_name, prompt in check_vars_to_prompts.items():\n if variable_name in post_data and not workflow_job_template[prompt]:\n param_errors.append(\"The field {0} was specified but the workflow job template does not allow for it to be overridden\".format(variable_name))\n # Check if Either ask_variables_on_launch, or survey_enabled is enabled for use of extra vars.\n if module.params.get('extra_vars') and not (workflow_job_template['ask_variables_on_launch'] or workflow_job_template['survey_enabled']):\n param_errors.append(\"The field extra_vars was specified but the workflow job template does not allow for it to be overridden\")\n if len(param_errors) > 0:\n module.fail_json(msg=\"Parameters specified which can not be passed into workflow job template, see errors for details\", errors=param_errors)\n\n # Launch the job\n result = module.post_endpoint(workflow_job_template['related']['launch'], data=post_data)\n\n if result['status_code'] != 201:\n module.fail_json(msg=\"Failed to launch workflow, see response for details\", response=result)\n\n module.json_output['changed'] = True\n module.json_output['id'] = result['json']['id']\n module.json_output['status'] = result['json']['status']\n # This is for backwards compatability\n module.json_output['job_info'] = {'id': result['json']['id']}\n\n if not wait:\n module.exit_json(**module.json_output)\n\n # Invoke wait function\n module.wait_on_url(url=result['json']['url'], object_name=name, object_type='Workflow Job', timeout=timeout, interval=interval)\n\n module.exit_json(**module.json_output)\n\n\nif __name__ == '__main__':\n main()\n", "path": "awx_collection/plugins/modules/workflow_launch.py"}]} | 2,991 | 477 |
gh_patches_debug_22758 | rasdani/github-patches | git_diff | ckan__ckan-5647 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Regression-2.9] Ckan Generate Config
**CKAN version**
2.9
**Describe the bug**
In ckan 2.9, ckan generate config does not make a config file that can then be immediately edited.
**Steps to reproduce**
In Ckan <= 2.8.5,
```
"$CKAN_HOME"/bin/paster make-config ckan "$CONFIG"
"$CKAN_HOME"/bin/paster --plugin=ckan config-tool "$CONFIG" \
"sqlalchemy.url = ${DATABASE_URL}" \
"solr_url = ${SOLR_URL}" \
"ckan.site_url = ${SITE_SCHEME}${SITE_HOST}" \
"ckan.redis.url = ${REDIS_URL}" \
...
```
worked, regardless of the database url.
In ckan 2.9:
```
"$CKAN_HOME"/bin/ckan generate config "$CONFIG"
"$CKAN_HOME"/bin/ckan config-tool "$CONFIG" \
"ckan.storage_path = /var/lib/ckan" \
"sqlalchemy.url = ${CKAN_DB_URL}"\
```
This fails, because the database is not reachable (and potentially the site url is not defined, IIRC)
**Expected behavior**
Either:
* Ckan generate config should provide the basic parameters required to run config-tool
* config-tool should be able to edit an incomplete ckan.ini file, by not attempting to contact the database, redis, or solr servers.
```
ckan generate config $CKAN_INI
ckan config-tool $CKAN_INI \
[options]
```
should work.
**Additional details**
If possible, please provide the full stack trace of the error raised, or add screenshots to help explain your problem.
This is my current work-around, which is ugly:
```
if [ ! -e "$CONFIG" ]; then
"$CKAN_HOME"/bin/ckan generate config "$CONFIG"
# site url can't be set with the config tool
perl -pi -e "s|^(ckan.site_url =)$|\1 ${SITE_SCHEME}${SITE_HOST}|;" $CONFIG
perl -pi -e "s|^#?(solr_url =).*$|\1 ${SOLR_URL}|;" $CONFIG
perl -pi -e "s|^#?(ckan.redis.url =).*|\1 ${REDIS_URL}|;" $CONFIG
# perl has issues with @db in the replacement
sed -i -e "s|^sqlalchemy.url =.*$|sqlalchemy.url = ${CKAN_DB_URL}|;" $CONFIG
fi
"$CKAN_HOME"/bin/ckan config-tool "$CONFIG" \
"ckan.storage_path = /var/lib/ckan" \
"sqlalchemy.url = ${CKAN_DB_URL}"\
"ckan.site_url = ${SITE_SCHEME}${SITE_HOST}" \
"solr_url = ${SOLR_URL}" \
```
</issue>
<code>
[start of ckan/cli/cli.py]
1 # encoding: utf-8
2
3 import logging
4 from collections import defaultdict
5 from pkg_resources import iter_entry_points
6
7 import six
8 import click
9 import sys
10
11 import ckan.plugins as p
12 import ckan.cli as ckan_cli
13 from ckan.config.middleware import make_app
14 from ckan.exceptions import CkanConfigurationException
15 from ckan.cli import (
16 config_tool,
17 jobs,
18 front_end_build,
19 db, search_index, server,
20 profile,
21 asset,
22 sysadmin,
23 translation,
24 dataset,
25 views,
26 plugin_info,
27 notify,
28 tracking,
29 minify,
30 less,
31 generate,
32 user
33 )
34
35 from ckan.cli import seed
36
37 log = logging.getLogger(__name__)
38
39
40 class CkanCommand(object):
41
42 def __init__(self, conf=None):
43 # Don't import `load_config` by itself, rather call it using
44 # module so that it can be patched during tests
45 self.config = ckan_cli.load_config(conf)
46 self.app = make_app(self.config)
47
48
49 def _get_commands_from_plugins(plugins):
50 for plugin in plugins:
51 for cmd in plugin.get_commands():
52 cmd._ckan_meta = {
53 u'name': plugin.name,
54 u'type': u'plugin'
55 }
56 yield cmd
57
58
59 def _get_commands_from_entry_point(entry_point=u'ckan.click_command'):
60 registered_entries = {}
61 for entry in iter_entry_points(entry_point):
62 if entry.name in registered_entries:
63 p.toolkit.error_shout((
64 u'Attempt to override entry_point `{name}`.\n'
65 u'First encounter:\n\t{first!r}\n'
66 u'Second encounter:\n\t{second!r}\n'
67 u'Either uninstall one of mentioned extensions or update'
68 u' corresponding `setup.py` and re-install the extension.'
69 ).format(
70 name=entry.name,
71 first=registered_entries[entry.name].dist,
72 second=entry.dist))
73 raise click.Abort()
74 registered_entries[entry.name] = entry
75
76 cmd = entry.load()
77 cmd._ckan_meta = {
78 u'name': entry.name,
79 u'type': u'entry_point'
80 }
81 yield cmd
82
83
84 def _init_ckan_config(ctx, param, value):
85 is_help = u'--help' in sys.argv
86 no_config = len(sys.argv) > 1 and sys.argv[1] in (
87 u'generate', u'config-tool')
88
89 try:
90 ctx.obj = CkanCommand(value)
91 except CkanConfigurationException as e:
92 # Some commands don't require the config loaded
93 if no_config or is_help:
94 return
95 else:
96 p.toolkit.error_shout(e)
97 raise click.Abort()
98
99 if six.PY2:
100 ctx.meta["flask_app"] = ctx.obj.app.apps["flask_app"]._wsgi_app
101 else:
102 ctx.meta["flask_app"] = ctx.obj.app._wsgi_app
103
104 for cmd in _get_commands_from_entry_point():
105 ctx.command.add_command(cmd)
106
107 plugins = p.PluginImplementations(p.IClick)
108 for cmd in _get_commands_from_plugins(plugins):
109 ctx.command.add_command(cmd)
110
111
112 click_config_option = click.option(
113 u'-c',
114 u'--config',
115 default=None,
116 metavar=u'CONFIG',
117 help=u'Config file to use (default: development.ini)',
118 is_eager=True,
119 callback=_init_ckan_config
120 )
121
122
123 class CustomGroup(click.Group):
124 _section_titles = {
125 u'plugin': u'Plugins',
126 u'entry_point': u'Entry points',
127 }
128
129 def format_commands(self, ctx, formatter):
130 # Without any arguments click skips option callbacks.
131 self.parse_args(ctx, [u'help'])
132
133 commands = []
134 ext_commands = defaultdict(lambda: defaultdict(list))
135
136 for subcommand in self.list_commands(ctx):
137 cmd = self.get_command(ctx, subcommand)
138 if cmd is None:
139 continue
140 help = cmd.short_help or u''
141
142 meta = getattr(cmd, u'_ckan_meta', None)
143 if meta:
144 ext_commands[meta[u'type']][meta[u'name']].append(
145 (subcommand, help))
146 else:
147 commands.append((subcommand, help))
148
149 if commands:
150 with formatter.section(u'Commands'):
151 formatter.write_dl(commands)
152
153 for section, group in ext_commands.items():
154 with formatter.section(self._section_titles.get(section, section)):
155 for _ext, rows in group.items():
156 formatter.write_dl(rows)
157
158
159 @click.group(cls=CustomGroup)
160 @click.help_option(u'-h', u'--help')
161 @click_config_option
162 def ckan(config, *args, **kwargs):
163 pass
164
165
166 ckan.add_command(jobs.jobs)
167 ckan.add_command(config_tool.config_tool)
168 ckan.add_command(front_end_build.front_end_build)
169 ckan.add_command(server.run)
170 ckan.add_command(profile.profile)
171 ckan.add_command(seed.seed)
172 ckan.add_command(db.db)
173 ckan.add_command(search_index.search_index)
174 ckan.add_command(sysadmin.sysadmin)
175 ckan.add_command(asset.asset)
176 ckan.add_command(translation.translation)
177 ckan.add_command(dataset.dataset)
178 ckan.add_command(views.views)
179 ckan.add_command(plugin_info.plugin_info)
180 ckan.add_command(notify.notify)
181 ckan.add_command(tracking.tracking)
182 ckan.add_command(minify.minify)
183 ckan.add_command(less.less)
184 ckan.add_command(generate.generate)
185 ckan.add_command(user.user)
186
[end of ckan/cli/cli.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ckan/cli/cli.py b/ckan/cli/cli.py
--- a/ckan/cli/cli.py
+++ b/ckan/cli/cli.py
@@ -36,6 +36,12 @@
log = logging.getLogger(__name__)
+_no_config_commands = [
+ [u'config-tool'],
+ [u'generate', u'config'],
+ [u'generate', u'extension'],
+]
+
class CkanCommand(object):
@@ -83,18 +89,20 @@
def _init_ckan_config(ctx, param, value):
is_help = u'--help' in sys.argv
- no_config = len(sys.argv) > 1 and sys.argv[1] in (
- u'generate', u'config-tool')
+ no_config = False
+ if len(sys.argv) > 1:
+ for cmd in _no_config_commands:
+ if sys.argv[1:len(cmd) + 1] == cmd:
+ no_config = True
+ break
+ if no_config or is_help:
+ return
try:
ctx.obj = CkanCommand(value)
except CkanConfigurationException as e:
- # Some commands don't require the config loaded
- if no_config or is_help:
- return
- else:
- p.toolkit.error_shout(e)
- raise click.Abort()
+ p.toolkit.error_shout(e)
+ raise click.Abort()
if six.PY2:
ctx.meta["flask_app"] = ctx.obj.app.apps["flask_app"]._wsgi_app
| {"golden_diff": "diff --git a/ckan/cli/cli.py b/ckan/cli/cli.py\n--- a/ckan/cli/cli.py\n+++ b/ckan/cli/cli.py\n@@ -36,6 +36,12 @@\n \n log = logging.getLogger(__name__)\n \n+_no_config_commands = [\n+ [u'config-tool'],\n+ [u'generate', u'config'],\n+ [u'generate', u'extension'],\n+]\n+\n \n class CkanCommand(object):\n \n@@ -83,18 +89,20 @@\n \n def _init_ckan_config(ctx, param, value):\n is_help = u'--help' in sys.argv\n- no_config = len(sys.argv) > 1 and sys.argv[1] in (\n- u'generate', u'config-tool')\n+ no_config = False\n+ if len(sys.argv) > 1:\n+ for cmd in _no_config_commands:\n+ if sys.argv[1:len(cmd) + 1] == cmd:\n+ no_config = True\n+ break\n+ if no_config or is_help:\n+ return\n \n try:\n ctx.obj = CkanCommand(value)\n except CkanConfigurationException as e:\n- # Some commands don't require the config loaded\n- if no_config or is_help:\n- return\n- else:\n- p.toolkit.error_shout(e)\n- raise click.Abort()\n+ p.toolkit.error_shout(e)\n+ raise click.Abort()\n \n if six.PY2:\n ctx.meta[\"flask_app\"] = ctx.obj.app.apps[\"flask_app\"]._wsgi_app\n", "issue": "[Regression-2.9] Ckan Generate Config \n**CKAN version**\r\n\r\n2.9\r\n\r\n**Describe the bug**\r\n\r\nIn ckan 2.9, ckan generate config does not make a config file that can then be immediately edited. \r\n\r\n**Steps to reproduce**\r\nIn Ckan <= 2.8.5, \r\n```\r\n \"$CKAN_HOME\"/bin/paster make-config ckan \"$CONFIG\"\r\n\r\n \"$CKAN_HOME\"/bin/paster --plugin=ckan config-tool \"$CONFIG\" \\\r\n \"sqlalchemy.url = ${DATABASE_URL}\" \\\r\n \"solr_url = ${SOLR_URL}\" \\\r\n \"ckan.site_url = ${SITE_SCHEME}${SITE_HOST}\" \\\r\n \"ckan.redis.url = ${REDIS_URL}\" \\\r\n ...\r\n```\r\nworked, regardless of the database url. \r\n\r\nIn ckan 2.9:\r\n```\r\n \"$CKAN_HOME\"/bin/ckan generate config \"$CONFIG\"\r\n\r\n \"$CKAN_HOME\"/bin/ckan config-tool \"$CONFIG\" \\\r\n \"ckan.storage_path = /var/lib/ckan\" \\\r\n \"sqlalchemy.url = ${CKAN_DB_URL}\"\\\r\n```\r\nThis fails, because the database is not reachable (and potentially the site url is not defined, IIRC)\r\n\r\n**Expected behavior**\r\n\r\nEither:\r\n* Ckan generate config should provide the basic parameters required to run config-tool\r\n* config-tool should be able to edit an incomplete ckan.ini file, by not attempting to contact the database, redis, or solr servers. \r\n\r\n```\r\nckan generate config $CKAN_INI\r\nckan config-tool $CKAN_INI \\ \r\n [options] \r\n```\r\nshould work. \r\n\r\n**Additional details**\r\nIf possible, please provide the full stack trace of the error raised, or add screenshots to help explain your problem.\r\n\r\nThis is my current work-around, which is ugly:\r\n```\r\n if [ ! -e \"$CONFIG\" ]; then\r\n \"$CKAN_HOME\"/bin/ckan generate config \"$CONFIG\"\r\n # site url can't be set with the config tool\r\n perl -pi -e \"s|^(ckan.site_url =)$|\\1 ${SITE_SCHEME}${SITE_HOST}|;\" $CONFIG\r\n perl -pi -e \"s|^#?(solr_url =).*$|\\1 ${SOLR_URL}|;\" $CONFIG\r\n perl -pi -e \"s|^#?(ckan.redis.url =).*|\\1 ${REDIS_URL}|;\" $CONFIG\r\n # perl has issues with @db in the replacement\r\n sed -i -e \"s|^sqlalchemy.url =.*$|sqlalchemy.url = ${CKAN_DB_URL}|;\" $CONFIG\r\n\r\n fi\r\n\r\n\r\n \"$CKAN_HOME\"/bin/ckan config-tool \"$CONFIG\" \\\r\n \"ckan.storage_path = /var/lib/ckan\" \\\r\n \"sqlalchemy.url = ${CKAN_DB_URL}\"\\\r\n \"ckan.site_url = ${SITE_SCHEME}${SITE_HOST}\" \\\r\n \"solr_url = ${SOLR_URL}\" \\\r\n```\r\n\n", "before_files": [{"content": "# encoding: utf-8\n\nimport logging\nfrom collections import defaultdict\nfrom pkg_resources import iter_entry_points\n\nimport six\nimport click\nimport sys\n\nimport ckan.plugins as p\nimport ckan.cli as ckan_cli\nfrom ckan.config.middleware import make_app\nfrom ckan.exceptions import CkanConfigurationException\nfrom ckan.cli import (\n config_tool,\n jobs,\n front_end_build,\n db, search_index, server,\n profile,\n asset,\n sysadmin,\n translation,\n dataset,\n views,\n plugin_info,\n notify,\n tracking,\n minify,\n less,\n generate,\n user\n)\n\nfrom ckan.cli import seed\n\nlog = logging.getLogger(__name__)\n\n\nclass CkanCommand(object):\n\n def __init__(self, conf=None):\n # Don't import `load_config` by itself, rather call it using\n # module so that it can be patched during tests\n self.config = ckan_cli.load_config(conf)\n self.app = make_app(self.config)\n\n\ndef _get_commands_from_plugins(plugins):\n for plugin in plugins:\n for cmd in plugin.get_commands():\n cmd._ckan_meta = {\n u'name': plugin.name,\n u'type': u'plugin'\n }\n yield cmd\n\n\ndef _get_commands_from_entry_point(entry_point=u'ckan.click_command'):\n registered_entries = {}\n for entry in iter_entry_points(entry_point):\n if entry.name in registered_entries:\n p.toolkit.error_shout((\n u'Attempt to override entry_point `{name}`.\\n'\n u'First encounter:\\n\\t{first!r}\\n'\n u'Second encounter:\\n\\t{second!r}\\n'\n u'Either uninstall one of mentioned extensions or update'\n u' corresponding `setup.py` and re-install the extension.'\n ).format(\n name=entry.name,\n first=registered_entries[entry.name].dist,\n second=entry.dist))\n raise click.Abort()\n registered_entries[entry.name] = entry\n\n cmd = entry.load()\n cmd._ckan_meta = {\n u'name': entry.name,\n u'type': u'entry_point'\n }\n yield cmd\n\n\ndef _init_ckan_config(ctx, param, value):\n is_help = u'--help' in sys.argv\n no_config = len(sys.argv) > 1 and sys.argv[1] in (\n u'generate', u'config-tool')\n\n try:\n ctx.obj = CkanCommand(value)\n except CkanConfigurationException as e:\n # Some commands don't require the config loaded\n if no_config or is_help:\n return\n else:\n p.toolkit.error_shout(e)\n raise click.Abort()\n\n if six.PY2:\n ctx.meta[\"flask_app\"] = ctx.obj.app.apps[\"flask_app\"]._wsgi_app\n else:\n ctx.meta[\"flask_app\"] = ctx.obj.app._wsgi_app\n\n for cmd in _get_commands_from_entry_point():\n ctx.command.add_command(cmd)\n\n plugins = p.PluginImplementations(p.IClick)\n for cmd in _get_commands_from_plugins(plugins):\n ctx.command.add_command(cmd)\n\n\nclick_config_option = click.option(\n u'-c',\n u'--config',\n default=None,\n metavar=u'CONFIG',\n help=u'Config file to use (default: development.ini)',\n is_eager=True,\n callback=_init_ckan_config\n)\n\n\nclass CustomGroup(click.Group):\n _section_titles = {\n u'plugin': u'Plugins',\n u'entry_point': u'Entry points',\n }\n\n def format_commands(self, ctx, formatter):\n # Without any arguments click skips option callbacks.\n self.parse_args(ctx, [u'help'])\n\n commands = []\n ext_commands = defaultdict(lambda: defaultdict(list))\n\n for subcommand in self.list_commands(ctx):\n cmd = self.get_command(ctx, subcommand)\n if cmd is None:\n continue\n help = cmd.short_help or u''\n\n meta = getattr(cmd, u'_ckan_meta', None)\n if meta:\n ext_commands[meta[u'type']][meta[u'name']].append(\n (subcommand, help))\n else:\n commands.append((subcommand, help))\n\n if commands:\n with formatter.section(u'Commands'):\n formatter.write_dl(commands)\n\n for section, group in ext_commands.items():\n with formatter.section(self._section_titles.get(section, section)):\n for _ext, rows in group.items():\n formatter.write_dl(rows)\n\n\[email protected](cls=CustomGroup)\[email protected]_option(u'-h', u'--help')\n@click_config_option\ndef ckan(config, *args, **kwargs):\n pass\n\n\nckan.add_command(jobs.jobs)\nckan.add_command(config_tool.config_tool)\nckan.add_command(front_end_build.front_end_build)\nckan.add_command(server.run)\nckan.add_command(profile.profile)\nckan.add_command(seed.seed)\nckan.add_command(db.db)\nckan.add_command(search_index.search_index)\nckan.add_command(sysadmin.sysadmin)\nckan.add_command(asset.asset)\nckan.add_command(translation.translation)\nckan.add_command(dataset.dataset)\nckan.add_command(views.views)\nckan.add_command(plugin_info.plugin_info)\nckan.add_command(notify.notify)\nckan.add_command(tracking.tracking)\nckan.add_command(minify.minify)\nckan.add_command(less.less)\nckan.add_command(generate.generate)\nckan.add_command(user.user)\n", "path": "ckan/cli/cli.py"}]} | 2,826 | 359 |
gh_patches_debug_27107 | rasdani/github-patches | git_diff | qutebrowser__qutebrowser-3034 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add small delay before updating completion
A small delay before updating the completion might help to reduce load when an user is e.g. holding backspace.
</issue>
<code>
[start of qutebrowser/completion/completer.py]
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>
4 #
5 # This file is part of qutebrowser.
6 #
7 # qutebrowser is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # qutebrowser is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
19
20 """Completer attached to a CompletionView."""
21
22 import attr
23 from PyQt5.QtCore import pyqtSlot, QObject, QTimer
24
25 from qutebrowser.config import config
26 from qutebrowser.commands import cmdutils, runners
27 from qutebrowser.utils import log, utils, debug
28 from qutebrowser.completion.models import miscmodels
29
30
31 @attr.s
32 class CompletionInfo:
33
34 """Context passed into all completion functions."""
35
36 config = attr.ib()
37 keyconf = attr.ib()
38
39
40 class Completer(QObject):
41
42 """Completer which manages completions in a CompletionView.
43
44 Attributes:
45 _cmd: The statusbar Command object this completer belongs to.
46 _ignore_change: Whether to ignore the next completion update.
47 _timer: The timer used to trigger the completion update.
48 _last_cursor_pos: The old cursor position so we avoid double completion
49 updates.
50 _last_text: The old command text so we avoid double completion updates.
51 _last_completion_func: The completion function used for the last text.
52 """
53
54 def __init__(self, cmd, parent=None):
55 super().__init__(parent)
56 self._cmd = cmd
57 self._ignore_change = False
58 self._timer = QTimer()
59 self._timer.setSingleShot(True)
60 self._timer.setInterval(0)
61 self._timer.timeout.connect(self._update_completion)
62 self._last_cursor_pos = None
63 self._last_text = None
64 self._last_completion_func = None
65 self._cmd.update_completion.connect(self.schedule_completion_update)
66
67 def __repr__(self):
68 return utils.get_repr(self)
69
70 def _model(self):
71 """Convenience method to get the current completion model."""
72 completion = self.parent()
73 return completion.model()
74
75 def _get_new_completion(self, before_cursor, under_cursor):
76 """Get the completion function based on the current command text.
77
78 Args:
79 before_cursor: The command chunks before the cursor.
80 under_cursor: The command chunk under the cursor.
81
82 Return:
83 A completion model.
84 """
85 if '--' in before_cursor or under_cursor.startswith('-'):
86 # cursor on a flag or after an explicit split (--)
87 return None
88 log.completion.debug("Before removing flags: {}".format(before_cursor))
89 before_cursor = [x for x in before_cursor if not x.startswith('-')]
90 log.completion.debug("After removing flags: {}".format(before_cursor))
91 if not before_cursor:
92 # '|' or 'set|'
93 log.completion.debug('Starting command completion')
94 return miscmodels.command
95 try:
96 cmd = cmdutils.cmd_dict[before_cursor[0]]
97 except KeyError:
98 log.completion.debug("No completion for unknown command: {}"
99 .format(before_cursor[0]))
100 return None
101 argpos = len(before_cursor) - 1
102 try:
103 func = cmd.get_pos_arg_info(argpos).completion
104 except IndexError:
105 log.completion.debug("No completion in position {}".format(argpos))
106 return None
107 return func
108
109 def _quote(self, s):
110 """Quote s if it needs quoting for the commandline.
111
112 Note we don't use shlex.quote because that quotes a lot of shell
113 metachars we don't need to have quoted.
114 """
115 if not s:
116 return "''"
117 elif any(c in s for c in ' "\'\t\n\\'):
118 # use single quotes, and put single quotes into double quotes
119 # the string $'b is then quoted as '$'"'"'b'
120 return "'" + s.replace("'", "'\"'\"'") + "'"
121 else:
122 return s
123
124 def _partition(self):
125 """Divide the commandline text into chunks around the cursor position.
126
127 Return:
128 ([parts_before_cursor], 'part_under_cursor', [parts_after_cursor])
129 """
130 text = self._cmd.text()[len(self._cmd.prefix()):]
131 if not text or not text.strip():
132 # Only ":", empty part under the cursor with nothing before/after
133 return [], '', []
134 parser = runners.CommandParser()
135 result = parser.parse(text, fallback=True, keep=True)
136 # pylint: disable=not-an-iterable
137 parts = [x for x in result.cmdline if x]
138 # pylint: enable=not-an-iterable
139 pos = self._cmd.cursorPosition() - len(self._cmd.prefix())
140 pos = min(pos, len(text)) # Qt treats 2-byte UTF-16 chars as 2 chars
141 log.completion.debug('partitioning {} around position {}'.format(parts,
142 pos))
143 for i, part in enumerate(parts):
144 pos -= len(part)
145 if pos <= 0:
146 if part[pos-1:pos+1].isspace():
147 # cursor is in a space between two existing words
148 parts.insert(i, '')
149 prefix = [x.strip() for x in parts[:i]]
150 center = parts[i].strip()
151 # strip trailing whitepsace included as a separate token
152 postfix = [x.strip() for x in parts[i+1:] if not x.isspace()]
153 log.completion.debug(
154 "partitioned: {} '{}' {}".format(prefix, center, postfix))
155 return prefix, center, postfix
156
157 # We should always return above
158 assert False, parts
159
160 @pyqtSlot(str)
161 def on_selection_changed(self, text):
162 """Change the completed part if a new item was selected.
163
164 Called from the views selectionChanged method.
165
166 Args:
167 text: Newly selected text.
168 """
169 if text is None:
170 return
171 before, center, after = self._partition()
172 log.completion.debug("Changing {} to '{}'".format(center, text))
173 try:
174 maxsplit = cmdutils.cmd_dict[before[0]].maxsplit
175 except (KeyError, IndexError):
176 maxsplit = None
177 if maxsplit is None:
178 text = self._quote(text)
179 model = self._model()
180 if model.count() == 1 and config.val.completion.quick:
181 # If we only have one item, we want to apply it immediately
182 # and go on to the next part.
183 self._change_completed_part(text, before, after, immediate=True)
184 if maxsplit is not None and maxsplit < len(before):
185 # If we are quick-completing the part after maxsplit, don't
186 # keep offering completions (see issue #1519)
187 self._ignore_change = True
188 else:
189 log.completion.debug("Will ignore next completion update.")
190 self._ignore_change = True
191 self._change_completed_part(text, before, after)
192
193 @pyqtSlot()
194 def schedule_completion_update(self):
195 """Schedule updating/enabling completion.
196
197 For performance reasons we don't want to block here, instead we do this
198 in the background.
199 """
200 if (self._cmd.cursorPosition() == self._last_cursor_pos and
201 self._cmd.text() == self._last_text):
202 log.completion.debug("Ignoring update because there were no "
203 "changes.")
204 else:
205 log.completion.debug("Scheduling completion update.")
206 self._timer.start()
207 self._last_cursor_pos = self._cmd.cursorPosition()
208 self._last_text = self._cmd.text()
209
210 @pyqtSlot()
211 def _update_completion(self):
212 """Check if completions are available and activate them."""
213 if self._ignore_change:
214 log.completion.debug("Ignoring completion update because "
215 "ignore_change is True.")
216 self._ignore_change = False
217 return
218
219 completion = self.parent()
220
221 if self._cmd.prefix() != ':':
222 # This is a search or gibberish, so we don't need to complete
223 # anything (yet)
224 # FIXME complete searches
225 # https://github.com/qutebrowser/qutebrowser/issues/32
226 completion.set_model(None)
227 self._last_completion_func = None
228 return
229
230 before_cursor, pattern, after_cursor = self._partition()
231
232 log.completion.debug("Updating completion: {} {} {}".format(
233 before_cursor, pattern, after_cursor))
234
235 pattern = pattern.strip("'\"")
236 func = self._get_new_completion(before_cursor, pattern)
237
238 if func is None:
239 log.completion.debug('Clearing completion')
240 completion.set_model(None)
241 self._last_completion_func = None
242 return
243
244 if func != self._last_completion_func:
245 self._last_completion_func = func
246 args = (x for x in before_cursor[1:] if not x.startswith('-'))
247 with debug.log_time(log.completion,
248 'Starting {} completion'.format(func.__name__)):
249 info = CompletionInfo(config=config.instance,
250 keyconf=config.key_instance)
251 model = func(*args, info=info)
252 with debug.log_time(log.completion, 'Set completion model'):
253 completion.set_model(model)
254
255 completion.set_pattern(pattern)
256
257 def _change_completed_part(self, newtext, before, after, immediate=False):
258 """Change the part we're currently completing in the commandline.
259
260 Args:
261 text: The text to set (string) for the token under the cursor.
262 before: Commandline tokens before the token under the cursor.
263 after: Commandline tokens after the token under the cursor.
264 immediate: True if the text should be completed immediately
265 including a trailing space and we shouldn't continue
266 completing the current item.
267 """
268 text = self._cmd.prefix() + ' '.join(before + [newtext])
269 pos = len(text) + (1 if immediate else 0)
270 if after:
271 text += ' ' + ' '.join(after)
272 elif immediate:
273 # pad with a space if quick-completing the last entry
274 text += ' '
275 log.completion.debug("setting text = '{}', pos = {}".format(text, pos))
276 self._cmd.setText(text)
277 self._cmd.setCursorPosition(pos)
278 self._cmd.setFocus()
279 self._cmd.show_cmd.emit()
280
[end of qutebrowser/completion/completer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/qutebrowser/completion/completer.py b/qutebrowser/completion/completer.py
--- a/qutebrowser/completion/completer.py
+++ b/qutebrowser/completion/completer.py
@@ -196,14 +196,25 @@
For performance reasons we don't want to block here, instead we do this
in the background.
+
+ We delay the update only if we've already input some text and ignore
+ updates if the text is shorter than completion.min_chars (unless we're
+ hitting backspace in which case updates won't be ignored).
"""
- if (self._cmd.cursorPosition() == self._last_cursor_pos and
+ _cmd, _sep, rest = self._cmd.text().partition(' ')
+ input_length = len(rest)
+ if (0 < input_length < config.val.completion.min_chars and
+ self._cmd.cursorPosition() > self._last_cursor_pos):
+ log.completion.debug("Ignoring update because the length of "
+ "the text is less than completion.min_chars.")
+ elif (self._cmd.cursorPosition() == self._last_cursor_pos and
self._cmd.text() == self._last_text):
log.completion.debug("Ignoring update because there were no "
"changes.")
else:
log.completion.debug("Scheduling completion update.")
- self._timer.start()
+ start_delay = config.val.completion.delay if self._last_text else 0
+ self._timer.start(start_delay)
self._last_cursor_pos = self._cmd.cursorPosition()
self._last_text = self._cmd.text()
| {"golden_diff": "diff --git a/qutebrowser/completion/completer.py b/qutebrowser/completion/completer.py\n--- a/qutebrowser/completion/completer.py\n+++ b/qutebrowser/completion/completer.py\n@@ -196,14 +196,25 @@\n \n For performance reasons we don't want to block here, instead we do this\n in the background.\n+\n+ We delay the update only if we've already input some text and ignore\n+ updates if the text is shorter than completion.min_chars (unless we're\n+ hitting backspace in which case updates won't be ignored).\n \"\"\"\n- if (self._cmd.cursorPosition() == self._last_cursor_pos and\n+ _cmd, _sep, rest = self._cmd.text().partition(' ')\n+ input_length = len(rest)\n+ if (0 < input_length < config.val.completion.min_chars and\n+ self._cmd.cursorPosition() > self._last_cursor_pos):\n+ log.completion.debug(\"Ignoring update because the length of \"\n+ \"the text is less than completion.min_chars.\")\n+ elif (self._cmd.cursorPosition() == self._last_cursor_pos and\n self._cmd.text() == self._last_text):\n log.completion.debug(\"Ignoring update because there were no \"\n \"changes.\")\n else:\n log.completion.debug(\"Scheduling completion update.\")\n- self._timer.start()\n+ start_delay = config.val.completion.delay if self._last_text else 0\n+ self._timer.start(start_delay)\n self._last_cursor_pos = self._cmd.cursorPosition()\n self._last_text = self._cmd.text()\n", "issue": "Add small delay before updating completion\nA small delay before updating the completion might help to reduce load when an user is e.g. holding backspace.\n\n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Completer attached to a CompletionView.\"\"\"\n\nimport attr\nfrom PyQt5.QtCore import pyqtSlot, QObject, QTimer\n\nfrom qutebrowser.config import config\nfrom qutebrowser.commands import cmdutils, runners\nfrom qutebrowser.utils import log, utils, debug\nfrom qutebrowser.completion.models import miscmodels\n\n\[email protected]\nclass CompletionInfo:\n\n \"\"\"Context passed into all completion functions.\"\"\"\n\n config = attr.ib()\n keyconf = attr.ib()\n\n\nclass Completer(QObject):\n\n \"\"\"Completer which manages completions in a CompletionView.\n\n Attributes:\n _cmd: The statusbar Command object this completer belongs to.\n _ignore_change: Whether to ignore the next completion update.\n _timer: The timer used to trigger the completion update.\n _last_cursor_pos: The old cursor position so we avoid double completion\n updates.\n _last_text: The old command text so we avoid double completion updates.\n _last_completion_func: The completion function used for the last text.\n \"\"\"\n\n def __init__(self, cmd, parent=None):\n super().__init__(parent)\n self._cmd = cmd\n self._ignore_change = False\n self._timer = QTimer()\n self._timer.setSingleShot(True)\n self._timer.setInterval(0)\n self._timer.timeout.connect(self._update_completion)\n self._last_cursor_pos = None\n self._last_text = None\n self._last_completion_func = None\n self._cmd.update_completion.connect(self.schedule_completion_update)\n\n def __repr__(self):\n return utils.get_repr(self)\n\n def _model(self):\n \"\"\"Convenience method to get the current completion model.\"\"\"\n completion = self.parent()\n return completion.model()\n\n def _get_new_completion(self, before_cursor, under_cursor):\n \"\"\"Get the completion function based on the current command text.\n\n Args:\n before_cursor: The command chunks before the cursor.\n under_cursor: The command chunk under the cursor.\n\n Return:\n A completion model.\n \"\"\"\n if '--' in before_cursor or under_cursor.startswith('-'):\n # cursor on a flag or after an explicit split (--)\n return None\n log.completion.debug(\"Before removing flags: {}\".format(before_cursor))\n before_cursor = [x for x in before_cursor if not x.startswith('-')]\n log.completion.debug(\"After removing flags: {}\".format(before_cursor))\n if not before_cursor:\n # '|' or 'set|'\n log.completion.debug('Starting command completion')\n return miscmodels.command\n try:\n cmd = cmdutils.cmd_dict[before_cursor[0]]\n except KeyError:\n log.completion.debug(\"No completion for unknown command: {}\"\n .format(before_cursor[0]))\n return None\n argpos = len(before_cursor) - 1\n try:\n func = cmd.get_pos_arg_info(argpos).completion\n except IndexError:\n log.completion.debug(\"No completion in position {}\".format(argpos))\n return None\n return func\n\n def _quote(self, s):\n \"\"\"Quote s if it needs quoting for the commandline.\n\n Note we don't use shlex.quote because that quotes a lot of shell\n metachars we don't need to have quoted.\n \"\"\"\n if not s:\n return \"''\"\n elif any(c in s for c in ' \"\\'\\t\\n\\\\'):\n # use single quotes, and put single quotes into double quotes\n # the string $'b is then quoted as '$'\"'\"'b'\n return \"'\" + s.replace(\"'\", \"'\\\"'\\\"'\") + \"'\"\n else:\n return s\n\n def _partition(self):\n \"\"\"Divide the commandline text into chunks around the cursor position.\n\n Return:\n ([parts_before_cursor], 'part_under_cursor', [parts_after_cursor])\n \"\"\"\n text = self._cmd.text()[len(self._cmd.prefix()):]\n if not text or not text.strip():\n # Only \":\", empty part under the cursor with nothing before/after\n return [], '', []\n parser = runners.CommandParser()\n result = parser.parse(text, fallback=True, keep=True)\n # pylint: disable=not-an-iterable\n parts = [x for x in result.cmdline if x]\n # pylint: enable=not-an-iterable\n pos = self._cmd.cursorPosition() - len(self._cmd.prefix())\n pos = min(pos, len(text)) # Qt treats 2-byte UTF-16 chars as 2 chars\n log.completion.debug('partitioning {} around position {}'.format(parts,\n pos))\n for i, part in enumerate(parts):\n pos -= len(part)\n if pos <= 0:\n if part[pos-1:pos+1].isspace():\n # cursor is in a space between two existing words\n parts.insert(i, '')\n prefix = [x.strip() for x in parts[:i]]\n center = parts[i].strip()\n # strip trailing whitepsace included as a separate token\n postfix = [x.strip() for x in parts[i+1:] if not x.isspace()]\n log.completion.debug(\n \"partitioned: {} '{}' {}\".format(prefix, center, postfix))\n return prefix, center, postfix\n\n # We should always return above\n assert False, parts\n\n @pyqtSlot(str)\n def on_selection_changed(self, text):\n \"\"\"Change the completed part if a new item was selected.\n\n Called from the views selectionChanged method.\n\n Args:\n text: Newly selected text.\n \"\"\"\n if text is None:\n return\n before, center, after = self._partition()\n log.completion.debug(\"Changing {} to '{}'\".format(center, text))\n try:\n maxsplit = cmdutils.cmd_dict[before[0]].maxsplit\n except (KeyError, IndexError):\n maxsplit = None\n if maxsplit is None:\n text = self._quote(text)\n model = self._model()\n if model.count() == 1 and config.val.completion.quick:\n # If we only have one item, we want to apply it immediately\n # and go on to the next part.\n self._change_completed_part(text, before, after, immediate=True)\n if maxsplit is not None and maxsplit < len(before):\n # If we are quick-completing the part after maxsplit, don't\n # keep offering completions (see issue #1519)\n self._ignore_change = True\n else:\n log.completion.debug(\"Will ignore next completion update.\")\n self._ignore_change = True\n self._change_completed_part(text, before, after)\n\n @pyqtSlot()\n def schedule_completion_update(self):\n \"\"\"Schedule updating/enabling completion.\n\n For performance reasons we don't want to block here, instead we do this\n in the background.\n \"\"\"\n if (self._cmd.cursorPosition() == self._last_cursor_pos and\n self._cmd.text() == self._last_text):\n log.completion.debug(\"Ignoring update because there were no \"\n \"changes.\")\n else:\n log.completion.debug(\"Scheduling completion update.\")\n self._timer.start()\n self._last_cursor_pos = self._cmd.cursorPosition()\n self._last_text = self._cmd.text()\n\n @pyqtSlot()\n def _update_completion(self):\n \"\"\"Check if completions are available and activate them.\"\"\"\n if self._ignore_change:\n log.completion.debug(\"Ignoring completion update because \"\n \"ignore_change is True.\")\n self._ignore_change = False\n return\n\n completion = self.parent()\n\n if self._cmd.prefix() != ':':\n # This is a search or gibberish, so we don't need to complete\n # anything (yet)\n # FIXME complete searches\n # https://github.com/qutebrowser/qutebrowser/issues/32\n completion.set_model(None)\n self._last_completion_func = None\n return\n\n before_cursor, pattern, after_cursor = self._partition()\n\n log.completion.debug(\"Updating completion: {} {} {}\".format(\n before_cursor, pattern, after_cursor))\n\n pattern = pattern.strip(\"'\\\"\")\n func = self._get_new_completion(before_cursor, pattern)\n\n if func is None:\n log.completion.debug('Clearing completion')\n completion.set_model(None)\n self._last_completion_func = None\n return\n\n if func != self._last_completion_func:\n self._last_completion_func = func\n args = (x for x in before_cursor[1:] if not x.startswith('-'))\n with debug.log_time(log.completion,\n 'Starting {} completion'.format(func.__name__)):\n info = CompletionInfo(config=config.instance,\n keyconf=config.key_instance)\n model = func(*args, info=info)\n with debug.log_time(log.completion, 'Set completion model'):\n completion.set_model(model)\n\n completion.set_pattern(pattern)\n\n def _change_completed_part(self, newtext, before, after, immediate=False):\n \"\"\"Change the part we're currently completing in the commandline.\n\n Args:\n text: The text to set (string) for the token under the cursor.\n before: Commandline tokens before the token under the cursor.\n after: Commandline tokens after the token under the cursor.\n immediate: True if the text should be completed immediately\n including a trailing space and we shouldn't continue\n completing the current item.\n \"\"\"\n text = self._cmd.prefix() + ' '.join(before + [newtext])\n pos = len(text) + (1 if immediate else 0)\n if after:\n text += ' ' + ' '.join(after)\n elif immediate:\n # pad with a space if quick-completing the last entry\n text += ' '\n log.completion.debug(\"setting text = '{}', pos = {}\".format(text, pos))\n self._cmd.setText(text)\n self._cmd.setCursorPosition(pos)\n self._cmd.setFocus()\n self._cmd.show_cmd.emit()\n", "path": "qutebrowser/completion/completer.py"}]} | 3,664 | 360 |
gh_patches_debug_6937 | rasdani/github-patches | git_diff | ivy-llc__ivy-22098 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
reshape_
</issue>
<code>
[start of ivy/functional/frontends/paddle/tensor/manipulation.py]
1 # global
2 import ivy
3 from ivy.functional.frontends.paddle.func_wrapper import (
4 to_ivy_arrays_and_back,
5 )
6 from ivy.func_wrapper import (
7 with_unsupported_dtypes,
8 with_supported_dtypes,
9 with_supported_device_and_dtypes,
10 )
11
12
13 @to_ivy_arrays_and_back
14 def reshape(x, shape):
15 return ivy.reshape(x, shape)
16
17
18 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
19 @to_ivy_arrays_and_back
20 def abs(x, name=None):
21 return ivy.abs(x)
22
23
24 absolute = abs
25
26
27 @to_ivy_arrays_and_back
28 def stack(x, axis=0, name=None):
29 return ivy.stack(x, axis=axis)
30
31
32 @with_unsupported_dtypes({"2.5.1 and below": ("int8", "int16")}, "paddle")
33 @to_ivy_arrays_and_back
34 def concat(x, axis, name=None):
35 return ivy.concat(x, axis=axis)
36
37
38 @with_unsupported_dtypes(
39 {"2.5.1 and below": ("int8", "uint8", "int16", "float16")},
40 "paddle",
41 )
42 @to_ivy_arrays_and_back
43 def tile(x, repeat_times, name=None):
44 return ivy.tile(x, repeats=repeat_times)
45
46
47 @with_unsupported_dtypes(
48 {"2.5.1 and below": ("int8", "uint8", "int16", "float16")},
49 "paddle",
50 )
51 @to_ivy_arrays_and_back
52 def flip(x, axis, name=None):
53 return ivy.flip(x, axis=axis)
54
55
56 @with_unsupported_dtypes(
57 {"2.5.1 and below": ("int16", "complex64", "complex128")},
58 "paddle",
59 )
60 @to_ivy_arrays_and_back
61 def split(x, num_or_sections, axis=0, name=None):
62 return ivy.split(x, num_or_size_splits=num_or_sections, axis=axis)
63
64
65 @with_unsupported_dtypes(
66 {"2.5.1 and below": ("float16", "bfloat16", "int8", "int16")},
67 "paddle",
68 )
69 @to_ivy_arrays_and_back
70 def squeeze(x, axis=None, name=None):
71 return ivy.squeeze(x, axis=axis)
72
73
74 @with_supported_dtypes(
75 {"2.5.1 and below": ("bool", "float32", "float64", "int32", "int64")},
76 "paddle",
77 )
78 @to_ivy_arrays_and_back
79 def expand(x, shape, name=None):
80 return ivy.expand(x, shape)
81
82
83 @with_supported_dtypes(
84 {
85 "2.5.1 and below": (
86 "bool",
87 "float16",
88 "float32",
89 "float64",
90 "int32",
91 "int64",
92 "uint8",
93 )
94 },
95 "paddle",
96 )
97 @to_ivy_arrays_and_back
98 def cast(x, dtype):
99 return ivy.astype(x, dtype)
100
101
102 @with_supported_dtypes(
103 {"2.5.1 and below": ("bool", "float32", "float64", "int32", "int64")},
104 "paddle",
105 )
106 @to_ivy_arrays_and_back
107 def broadcast_to(x, shape, name=None):
108 return ivy.broadcast_to(x, shape)
109
110
111 @with_supported_dtypes(
112 {"2.5.1 and below": ("bool", "float32", "float64", "int32", "int64")},
113 "paddle",
114 )
115 @to_ivy_arrays_and_back
116 def gather(params, indices, axis=-1, batch_dims=0, name=None):
117 return ivy.gather(params, indices, axis=axis, batch_dims=batch_dims)
118
119
120 @with_supported_dtypes(
121 {
122 "2.5.0 and below": (
123 "float32",
124 "float64",
125 "int32",
126 "int64",
127 "complex64",
128 "complex128",
129 )
130 },
131 "paddle",
132 )
133 @to_ivy_arrays_and_back
134 def roll(x, shifts, axis=None, name=None):
135 return ivy.roll(x, shifts, axis=axis)
136
137
138 @with_supported_dtypes(
139 {
140 "2.5.1 and below": (
141 "float32",
142 "float64",
143 "int32",
144 "int64",
145 )
146 },
147 "paddle",
148 )
149 @to_ivy_arrays_and_back
150 def take_along_axis(arr, indices, axis):
151 return ivy.take_along_axis(arr, indices, axis)
152
153
154 @with_supported_device_and_dtypes(
155 {
156 "2.5.1 and above": {
157 "cpu": (
158 "bool",
159 "int32",
160 "int64",
161 "float32",
162 "float64",
163 ),
164 "gpu": ("float16",),
165 },
166 },
167 "paddle",
168 )
169 @to_ivy_arrays_and_back
170 def rot90(x, k=1, axes=(0, 1), name=None):
171 return ivy.rot90(x, k=k, axes=axes)
172
[end of ivy/functional/frontends/paddle/tensor/manipulation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/paddle/tensor/manipulation.py b/ivy/functional/frontends/paddle/tensor/manipulation.py
--- a/ivy/functional/frontends/paddle/tensor/manipulation.py
+++ b/ivy/functional/frontends/paddle/tensor/manipulation.py
@@ -15,6 +15,17 @@
return ivy.reshape(x, shape)
+@with_unsupported_dtypes(
+ {"2.5.1 and below": ("int8", "uint8", "int16", "uint16", "float16", "bfloat16")},
+ "paddle",
+)
+@to_ivy_arrays_and_back
+def reshape_(x, shape):
+ ret = ivy.reshape(x, shape)
+ ivy.inplace_update(x, ret)
+ return x
+
+
@with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
@to_ivy_arrays_and_back
def abs(x, name=None):
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/manipulation.py b/ivy/functional/frontends/paddle/tensor/manipulation.py\n--- a/ivy/functional/frontends/paddle/tensor/manipulation.py\n+++ b/ivy/functional/frontends/paddle/tensor/manipulation.py\n@@ -15,6 +15,17 @@\n return ivy.reshape(x, shape)\n \n \n+@with_unsupported_dtypes(\n+ {\"2.5.1 and below\": (\"int8\", \"uint8\", \"int16\", \"uint16\", \"float16\", \"bfloat16\")},\n+ \"paddle\",\n+)\n+@to_ivy_arrays_and_back\n+def reshape_(x, shape):\n+ ret = ivy.reshape(x, shape)\n+ ivy.inplace_update(x, ret)\n+ return x\n+\n+\n @with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n @to_ivy_arrays_and_back\n def abs(x, name=None):\n", "issue": "reshape_\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\nfrom ivy.func_wrapper import (\n with_unsupported_dtypes,\n with_supported_dtypes,\n with_supported_device_and_dtypes,\n)\n\n\n@to_ivy_arrays_and_back\ndef reshape(x, shape):\n return ivy.reshape(x, shape)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\nabsolute = abs\n\n\n@to_ivy_arrays_and_back\ndef stack(x, axis=0, name=None):\n return ivy.stack(x, axis=axis)\n\n\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"int8\", \"int16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef concat(x, axis, name=None):\n return ivy.concat(x, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.1 and below\": (\"int8\", \"uint8\", \"int16\", \"float16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef tile(x, repeat_times, name=None):\n return ivy.tile(x, repeats=repeat_times)\n\n\n@with_unsupported_dtypes(\n {\"2.5.1 and below\": (\"int8\", \"uint8\", \"int16\", \"float16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef flip(x, axis, name=None):\n return ivy.flip(x, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.1 and below\": (\"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef split(x, num_or_sections, axis=0, name=None):\n return ivy.split(x, num_or_size_splits=num_or_sections, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.1 and below\": (\"float16\", \"bfloat16\", \"int8\", \"int16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef squeeze(x, axis=None, name=None):\n return ivy.squeeze(x, axis=axis)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"bool\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef expand(x, shape, name=None):\n return ivy.expand(x, shape)\n\n\n@with_supported_dtypes(\n {\n \"2.5.1 and below\": (\n \"bool\",\n \"float16\",\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"uint8\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef cast(x, dtype):\n return ivy.astype(x, dtype)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"bool\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef broadcast_to(x, shape, name=None):\n return ivy.broadcast_to(x, shape)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"bool\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef gather(params, indices, axis=-1, batch_dims=0, name=None):\n return ivy.gather(params, indices, axis=axis, batch_dims=batch_dims)\n\n\n@with_supported_dtypes(\n {\n \"2.5.0 and below\": (\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef roll(x, shifts, axis=None, name=None):\n return ivy.roll(x, shifts, axis=axis)\n\n\n@with_supported_dtypes(\n {\n \"2.5.1 and below\": (\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef take_along_axis(arr, indices, axis):\n return ivy.take_along_axis(arr, indices, axis)\n\n\n@with_supported_device_and_dtypes(\n {\n \"2.5.1 and above\": {\n \"cpu\": (\n \"bool\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n ),\n \"gpu\": (\"float16\",),\n },\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef rot90(x, k=1, axes=(0, 1), name=None):\n return ivy.rot90(x, k=k, axes=axes)\n", "path": "ivy/functional/frontends/paddle/tensor/manipulation.py"}]} | 2,143 | 239 |
gh_patches_debug_23414 | rasdani/github-patches | git_diff | gammapy__gammapy-2558 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Log scale and grids
This issue is triggered by https://github.com/gammapy/gammapy/issues/2270#issue-464031097 - there's a test fail with Numpy dev - the issue is that we are making an `nbins` float here:
https://github.com/gammapy/gammapy/blob/7a19c4b3b3e82831c6c683db9799a1d803cf928f/gammapy/utils/energy.py#L34
@adonath - Could you please fix this? I'm not sure if the proper fix is to round up or down to the nearest int.
There's other options as well, e.g. we could just remove the `per_decade` argument or that helper function completely and use this:
https://github.com/gammapy/gammapy/blob/76ab409e6bee09436127ce67e11de1ad5b2d7bb0/gammapy/maps/geom.py#L479
Or introduce an equivalent of `numpy.arange` - Numpy offered a separate function for the case where the caller wants a stepsize instead of a given number of points - maybe we should do that as well to be symmetric, and to keep support for setting up grids of a desired step size easily.
Finally, there's the option to attache the grid-making functionality to the existing scale classes here and to use those throughout Gammapy:
https://github.com/gammapy/gammapy/blob/5a24919d91a9aa601f7a1ee3d677cf4bebfc6b3b/gammapy/utils/interpolation.py#L138
I think https://observablehq.com/@d3/scale-ticks and also matplotlib have the feature to make grids or ticks attached to the scales. Here in Astropy this wasn't needed/added so far http://docs.astropy.org/en/stable/visualization/normalization.html
In principle having energy-specific scale and grid functions for log isn't needed or good design, on the other hand it's the only case (currently, but later?) where we need a log scale, so just fixing up the existing helper function is not too bad.
</issue>
<code>
[start of gammapy/utils/nddata.py]
1 # Licensed under a 3-clause BSD style license - see LICENSE.rst
2 """Utility functions and classes for n-dimensional data and axes."""
3 import numpy as np
4 from astropy.units import Quantity
5 from .array import array_stats_str
6 from .interpolation import ScaledRegularGridInterpolator
7
8 __all__ = ["NDDataArray", "sqrt_space"]
9
10
11 class NDDataArray:
12 """ND Data Array Base class
13
14 Parameters
15 ----------
16 axes : list
17 List of `~gammapy.utils.nddata.DataAxis`
18 data : `~astropy.units.Quantity`
19 Data
20 meta : dict
21 Meta info
22 interp_kwargs : dict
23 TODO
24 """
25
26 default_interp_kwargs = dict(bounds_error=False, values_scale="lin")
27 """Default interpolation kwargs used to initialize the
28 `scipy.interpolate.RegularGridInterpolator`. The interpolation behaviour
29 of an individual axis ('log', 'linear') can be passed to the axis on
30 initialization."""
31
32 def __init__(self, axes, data=None, meta=None, interp_kwargs=None):
33 self._axes = axes
34 if data is not None:
35 self.data = data
36 self.meta = meta or {}
37 self.interp_kwargs = interp_kwargs or self.default_interp_kwargs
38
39 self._regular_grid_interp = None
40
41 def __str__(self):
42 ss = "NDDataArray summary info\n"
43 for axis in self.axes:
44 ss += str(axis)
45 ss += array_stats_str(self.data, "Data")
46 return ss
47
48 @property
49 def axes(self):
50 """Array holding the axes in correct order"""
51 return self._axes
52
53 def axis(self, name):
54 """Return axis by name"""
55 try:
56 idx = [_.name for _ in self.axes].index(name)
57 except ValueError:
58 raise ValueError(f"Axis {name} not found")
59 return self.axes[idx]
60
61 @property
62 def data(self):
63 """Array holding the n-dimensional data."""
64 return self._data
65
66 @data.setter
67 def data(self, data):
68 """Set data.
69
70 Some sanity checks are performed to avoid an invalid array.
71 Also, the interpolator is set to None to avoid unwanted behaviour.
72
73 Parameters
74 ----------
75 data : `~astropy.units.Quantity`, array-like
76 Data array
77 """
78 data = Quantity(data)
79 dimension = len(data.shape)
80 if dimension != self.dim:
81 raise ValueError(
82 "Overall dimensions to not match. "
83 "Data: {}, Hist: {}".format(dimension, self.dim)
84 )
85
86 for dim in np.arange(self.dim):
87 axis = self.axes[dim]
88 if axis.nbin != data.shape[dim]:
89 msg = "Data shape does not match in dimension {d}\n"
90 msg += "Axis {n} : {sa}, Data {sd}"
91 raise ValueError(
92 msg.format(d=dim, n=axis.name, sa=axis.nbin, sd=data.shape[dim])
93 )
94 self._regular_grid_interp = None
95 self._data = data
96
97 @property
98 def dim(self):
99 """Dimension (number of axes)"""
100 return len(self.axes)
101
102 def evaluate(self, method=None, **kwargs):
103 """Evaluate NDData Array
104
105 This function provides a uniform interface to several interpolators.
106 The evaluation nodes are given as ``kwargs``.
107
108 Currently available:
109 `~scipy.interpolate.RegularGridInterpolator`, methods: linear, nearest
110
111 Parameters
112 ----------
113 method : str {'linear', 'nearest'}, optional
114 Interpolation method
115 kwargs : dict
116 Keys are the axis names, Values the evaluation points
117
118 Returns
119 -------
120 array : `~astropy.units.Quantity`
121 Interpolated values, axis order is the same as for the NDData array
122 """
123 values = []
124 for idx, axis in enumerate(self.axes):
125 # Extract values for each axis, default: nodes
126 shape = [1] * len(self.axes)
127 shape[idx] = -1
128 default = axis.center.reshape(tuple(shape))
129 temp = Quantity(kwargs.pop(axis.name, default))
130 values.append(np.atleast_1d(temp))
131
132 # This is to catch e.g. typos in axis names
133 if kwargs != {}:
134 raise ValueError(f"Input given for unknown axis: {kwargs}")
135
136 if self._regular_grid_interp is None:
137 self._add_regular_grid_interp()
138
139 return self._regular_grid_interp(values, method=method, **kwargs)
140
141 def _add_regular_grid_interp(self, interp_kwargs=None):
142 """Add `~scipy.interpolate.RegularGridInterpolator`
143
144 http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.interpolate.RegularGridInterpolator.html
145
146 Parameters
147 ----------
148 interp_kwargs : dict, optional
149 Interpolation kwargs
150 """
151 if interp_kwargs is None:
152 interp_kwargs = self.interp_kwargs
153
154 points = [a.center for a in self.axes]
155 points_scale = [a.interp for a in self.axes]
156 self._regular_grid_interp = ScaledRegularGridInterpolator(
157 points, self.data, points_scale=points_scale, **interp_kwargs
158 )
159
160
161 def sqrt_space(start, stop, num):
162 """Return numbers spaced evenly on a square root scale.
163
164 This function is similar to `numpy.linspace` and `numpy.logspace`.
165
166 Parameters
167 ----------
168 start : float
169 start is the starting value of the sequence
170 stop : float
171 stop is the final value of the sequence
172 num : int
173 Number of samples to generate.
174
175 Returns
176 -------
177 samples : `~numpy.ndarray`
178 1D array with a square root scale
179
180 Examples
181 --------
182 >>> from gammapy.utils.nddata import sqrt_space
183 >>> sqrt_space(0, 2, 5)
184 array([0. , 1. , 1.41421356, 1.73205081, 2. ])
185 """
186 return np.sqrt(np.linspace(start ** 2, stop ** 2, num))
187
[end of gammapy/utils/nddata.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gammapy/utils/nddata.py b/gammapy/utils/nddata.py
--- a/gammapy/utils/nddata.py
+++ b/gammapy/utils/nddata.py
@@ -5,7 +5,7 @@
from .array import array_stats_str
from .interpolation import ScaledRegularGridInterpolator
-__all__ = ["NDDataArray", "sqrt_space"]
+__all__ = ["NDDataArray"]
class NDDataArray:
@@ -156,31 +156,3 @@
self._regular_grid_interp = ScaledRegularGridInterpolator(
points, self.data, points_scale=points_scale, **interp_kwargs
)
-
-
-def sqrt_space(start, stop, num):
- """Return numbers spaced evenly on a square root scale.
-
- This function is similar to `numpy.linspace` and `numpy.logspace`.
-
- Parameters
- ----------
- start : float
- start is the starting value of the sequence
- stop : float
- stop is the final value of the sequence
- num : int
- Number of samples to generate.
-
- Returns
- -------
- samples : `~numpy.ndarray`
- 1D array with a square root scale
-
- Examples
- --------
- >>> from gammapy.utils.nddata import sqrt_space
- >>> sqrt_space(0, 2, 5)
- array([0. , 1. , 1.41421356, 1.73205081, 2. ])
- """
- return np.sqrt(np.linspace(start ** 2, stop ** 2, num))
| {"golden_diff": "diff --git a/gammapy/utils/nddata.py b/gammapy/utils/nddata.py\n--- a/gammapy/utils/nddata.py\n+++ b/gammapy/utils/nddata.py\n@@ -5,7 +5,7 @@\n from .array import array_stats_str\n from .interpolation import ScaledRegularGridInterpolator\n \n-__all__ = [\"NDDataArray\", \"sqrt_space\"]\n+__all__ = [\"NDDataArray\"]\n \n \n class NDDataArray:\n@@ -156,31 +156,3 @@\n self._regular_grid_interp = ScaledRegularGridInterpolator(\n points, self.data, points_scale=points_scale, **interp_kwargs\n )\n-\n-\n-def sqrt_space(start, stop, num):\n- \"\"\"Return numbers spaced evenly on a square root scale.\n-\n- This function is similar to `numpy.linspace` and `numpy.logspace`.\n-\n- Parameters\n- ----------\n- start : float\n- start is the starting value of the sequence\n- stop : float\n- stop is the final value of the sequence\n- num : int\n- Number of samples to generate.\n-\n- Returns\n- -------\n- samples : `~numpy.ndarray`\n- 1D array with a square root scale\n-\n- Examples\n- --------\n- >>> from gammapy.utils.nddata import sqrt_space\n- >>> sqrt_space(0, 2, 5)\n- array([0. , 1. , 1.41421356, 1.73205081, 2. ])\n- \"\"\"\n- return np.sqrt(np.linspace(start ** 2, stop ** 2, num))\n", "issue": "Log scale and grids\nThis issue is triggered by https://github.com/gammapy/gammapy/issues/2270#issue-464031097 - there's a test fail with Numpy dev - the issue is that we are making an `nbins` float here:\r\n\r\nhttps://github.com/gammapy/gammapy/blob/7a19c4b3b3e82831c6c683db9799a1d803cf928f/gammapy/utils/energy.py#L34\r\n\r\n@adonath - Could you please fix this? I'm not sure if the proper fix is to round up or down to the nearest int.\r\n\r\nThere's other options as well, e.g. we could just remove the `per_decade` argument or that helper function completely and use this:\r\nhttps://github.com/gammapy/gammapy/blob/76ab409e6bee09436127ce67e11de1ad5b2d7bb0/gammapy/maps/geom.py#L479\r\n\r\nOr introduce an equivalent of `numpy.arange` - Numpy offered a separate function for the case where the caller wants a stepsize instead of a given number of points - maybe we should do that as well to be symmetric, and to keep support for setting up grids of a desired step size easily.\r\n\r\nFinally, there's the option to attache the grid-making functionality to the existing scale classes here and to use those throughout Gammapy:\r\nhttps://github.com/gammapy/gammapy/blob/5a24919d91a9aa601f7a1ee3d677cf4bebfc6b3b/gammapy/utils/interpolation.py#L138\r\n\r\nI think https://observablehq.com/@d3/scale-ticks and also matplotlib have the feature to make grids or ticks attached to the scales. Here in Astropy this wasn't needed/added so far http://docs.astropy.org/en/stable/visualization/normalization.html\r\n\r\nIn principle having energy-specific scale and grid functions for log isn't needed or good design, on the other hand it's the only case (currently, but later?) where we need a log scale, so just fixing up the existing helper function is not too bad.\n", "before_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Utility functions and classes for n-dimensional data and axes.\"\"\"\nimport numpy as np\nfrom astropy.units import Quantity\nfrom .array import array_stats_str\nfrom .interpolation import ScaledRegularGridInterpolator\n\n__all__ = [\"NDDataArray\", \"sqrt_space\"]\n\n\nclass NDDataArray:\n \"\"\"ND Data Array Base class\n\n Parameters\n ----------\n axes : list\n List of `~gammapy.utils.nddata.DataAxis`\n data : `~astropy.units.Quantity`\n Data\n meta : dict\n Meta info\n interp_kwargs : dict\n TODO\n \"\"\"\n\n default_interp_kwargs = dict(bounds_error=False, values_scale=\"lin\")\n \"\"\"Default interpolation kwargs used to initialize the\n `scipy.interpolate.RegularGridInterpolator`. The interpolation behaviour\n of an individual axis ('log', 'linear') can be passed to the axis on\n initialization.\"\"\"\n\n def __init__(self, axes, data=None, meta=None, interp_kwargs=None):\n self._axes = axes\n if data is not None:\n self.data = data\n self.meta = meta or {}\n self.interp_kwargs = interp_kwargs or self.default_interp_kwargs\n\n self._regular_grid_interp = None\n\n def __str__(self):\n ss = \"NDDataArray summary info\\n\"\n for axis in self.axes:\n ss += str(axis)\n ss += array_stats_str(self.data, \"Data\")\n return ss\n\n @property\n def axes(self):\n \"\"\"Array holding the axes in correct order\"\"\"\n return self._axes\n\n def axis(self, name):\n \"\"\"Return axis by name\"\"\"\n try:\n idx = [_.name for _ in self.axes].index(name)\n except ValueError:\n raise ValueError(f\"Axis {name} not found\")\n return self.axes[idx]\n\n @property\n def data(self):\n \"\"\"Array holding the n-dimensional data.\"\"\"\n return self._data\n\n @data.setter\n def data(self, data):\n \"\"\"Set data.\n\n Some sanity checks are performed to avoid an invalid array.\n Also, the interpolator is set to None to avoid unwanted behaviour.\n\n Parameters\n ----------\n data : `~astropy.units.Quantity`, array-like\n Data array\n \"\"\"\n data = Quantity(data)\n dimension = len(data.shape)\n if dimension != self.dim:\n raise ValueError(\n \"Overall dimensions to not match. \"\n \"Data: {}, Hist: {}\".format(dimension, self.dim)\n )\n\n for dim in np.arange(self.dim):\n axis = self.axes[dim]\n if axis.nbin != data.shape[dim]:\n msg = \"Data shape does not match in dimension {d}\\n\"\n msg += \"Axis {n} : {sa}, Data {sd}\"\n raise ValueError(\n msg.format(d=dim, n=axis.name, sa=axis.nbin, sd=data.shape[dim])\n )\n self._regular_grid_interp = None\n self._data = data\n\n @property\n def dim(self):\n \"\"\"Dimension (number of axes)\"\"\"\n return len(self.axes)\n\n def evaluate(self, method=None, **kwargs):\n \"\"\"Evaluate NDData Array\n\n This function provides a uniform interface to several interpolators.\n The evaluation nodes are given as ``kwargs``.\n\n Currently available:\n `~scipy.interpolate.RegularGridInterpolator`, methods: linear, nearest\n\n Parameters\n ----------\n method : str {'linear', 'nearest'}, optional\n Interpolation method\n kwargs : dict\n Keys are the axis names, Values the evaluation points\n\n Returns\n -------\n array : `~astropy.units.Quantity`\n Interpolated values, axis order is the same as for the NDData array\n \"\"\"\n values = []\n for idx, axis in enumerate(self.axes):\n # Extract values for each axis, default: nodes\n shape = [1] * len(self.axes)\n shape[idx] = -1\n default = axis.center.reshape(tuple(shape))\n temp = Quantity(kwargs.pop(axis.name, default))\n values.append(np.atleast_1d(temp))\n\n # This is to catch e.g. typos in axis names\n if kwargs != {}:\n raise ValueError(f\"Input given for unknown axis: {kwargs}\")\n\n if self._regular_grid_interp is None:\n self._add_regular_grid_interp()\n\n return self._regular_grid_interp(values, method=method, **kwargs)\n\n def _add_regular_grid_interp(self, interp_kwargs=None):\n \"\"\"Add `~scipy.interpolate.RegularGridInterpolator`\n\n http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.interpolate.RegularGridInterpolator.html\n\n Parameters\n ----------\n interp_kwargs : dict, optional\n Interpolation kwargs\n \"\"\"\n if interp_kwargs is None:\n interp_kwargs = self.interp_kwargs\n\n points = [a.center for a in self.axes]\n points_scale = [a.interp for a in self.axes]\n self._regular_grid_interp = ScaledRegularGridInterpolator(\n points, self.data, points_scale=points_scale, **interp_kwargs\n )\n\n\ndef sqrt_space(start, stop, num):\n \"\"\"Return numbers spaced evenly on a square root scale.\n\n This function is similar to `numpy.linspace` and `numpy.logspace`.\n\n Parameters\n ----------\n start : float\n start is the starting value of the sequence\n stop : float\n stop is the final value of the sequence\n num : int\n Number of samples to generate.\n\n Returns\n -------\n samples : `~numpy.ndarray`\n 1D array with a square root scale\n\n Examples\n --------\n >>> from gammapy.utils.nddata import sqrt_space\n >>> sqrt_space(0, 2, 5)\n array([0. , 1. , 1.41421356, 1.73205081, 2. ])\n \"\"\"\n return np.sqrt(np.linspace(start ** 2, stop ** 2, num))\n", "path": "gammapy/utils/nddata.py"}]} | 2,836 | 380 |
gh_patches_debug_3735 | rasdani/github-patches | git_diff | spacetelescope__jwql-167 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
logging_functions should write logs from dev & test VMs to dev/
Currently `logging_functions.py` will write new log files to the `dev/` directory if the script being executed is not being executed from the admin account. This should be extended to write to `dev/` when the script is run from personal accounts from the dev or test VMs.
In other words, all logs should be written to `dev/` unless they are being run from the admin account on one of the production VMs.
</issue>
<code>
[start of jwql/utils/logging_functions.py]
1
2 """ Logging functions for the ``jwql`` automation platform.
3
4 This module provides decorators to log the execution of modules. Log
5 files are written to the ``logs/`` directory in the ``jwql`` central
6 storage area, named by module name and timestamp, e.g.
7 ``monitor_filesystem/monitor_filesystem_2018-06-20-15:22:51.log``
8
9
10 Authors
11 -------
12
13 - Catherine Martlin 2018
14 - Alex Viana, 2013 (WFC3 QL Version)
15
16 Use
17 ---
18
19 To log the execution of a module, use:
20 ::
21
22 import os
23 import logging
24
25 from jwql.logging.logging_functions import configure_logging
26 from jwql.logging.logging_functions import log_info
27 from jwql.logging.logging_functions import log_fail
28
29 @log_info
30 @log_fail
31 def my_main_function():
32 pass
33
34 if __name__ == '__main__':
35
36 module = os.path.basename(__file__).replace('.py', '')
37 configure_logging(module)
38
39 my_main_function()
40
41 Dependencies
42 ------------
43
44 The user must have a configuration file named ``config.json``
45 placed in the ``utils`` directory.
46
47
48 References
49 ----------
50 This code is adopted and updated from python routine
51 ``logging_functions.py`` written by Alex Viana, 2013 for the WFC3
52 Quicklook automation platform.
53 """
54
55 import datetime
56 import getpass
57 import importlib
58 import logging
59 import os
60 import pwd
61 import socket
62 import sys
63 import time
64 import traceback
65
66 from functools import wraps
67
68 from jwql.utils.permissions import set_permissions
69 from jwql.utils.utils import get_config, ensure_dir_exists
70
71 LOG_FILE_LOC = ''
72 PRODUCTION_BOOL = ''
73
74
75 def configure_logging(module, production_mode=True, path='./'):
76 """Configure the log file with a standard logging format.
77
78 Parameters
79 ----------
80 module : str
81 The name of the module being logged.
82 production_mode : bool
83 Whether or not the output should be written to the production
84 environement.
85 path : str
86 Where to write the log if user-supplied path; default to working dir.
87 """
88
89 # Determine log file location
90 if production_mode:
91 log_file = make_log_file(module)
92 else:
93 log_file = make_log_file(module, production_mode=False, path=path)
94 global LOG_FILE_LOC
95 global PRODUCTION_BOOL
96 LOG_FILE_LOC = log_file
97 PRODUCTION_BOOL = production_mode
98
99 # Create the log file and set the permissions
100 logging.basicConfig(filename=log_file,
101 format='%(asctime)s %(levelname)s: %(message)s',
102 datefmt='%m/%d/%Y %H:%M:%S %p',
103 level=logging.INFO)
104 set_permissions(log_file)
105
106
107 def make_log_file(module, production_mode=True, path='./'):
108 """Create the log file name based on the module name.
109
110 The name of the ``log_file`` is a combination of the name of the
111 module being logged and the current datetime.
112
113 Parameters
114 ----------
115 module : str
116 The name of the module being logged.
117 production_mode : bool
118 Whether or not the output should be written to the production
119 environment.
120 path : str
121 Where to write the log if user-supplied path; default to
122 working dir.
123
124 Returns
125 -------
126 log_file : str
127 The full path to where the log file will be written to.
128 """
129
130 timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M')
131 filename = '{0}_{1}.log'.format(module, timestamp)
132 user = pwd.getpwuid(os.getuid()).pw_name
133
134 settings = get_config()
135 admin_account = settings['admin_account']
136 log_path = settings['log_dir']
137
138 exempt_modules = []
139 if user != admin_account and module not in exempt_modules and production_mode:
140 module = os.path.join('dev', module)
141
142 if production_mode:
143 log_file = os.path.join(log_path, module, filename)
144 else:
145 log_file = os.path.join(path, filename)
146
147 ensure_dir_exists(os.path.dirname(log_file))
148
149 return log_file
150
151
152 def log_info(func):
153 """Decorator to log useful system information.
154
155 This function can be used as a decorator to log user environment
156 and system information. Future packages we want to track can be
157 added or removed as necessary.
158
159 Parameters
160 ----------
161 func : func
162 The function to decorate.
163
164 Returns
165 -------
166 wrapped : func
167 The wrapped function.
168 """
169
170 @wraps(func)
171 def wrapped(*a, **kw):
172
173 # Log environment information
174 logging.info('User: ' + getpass.getuser())
175 logging.info('System: ' + socket.gethostname())
176 logging.info('Python Version: ' + sys.version.replace('\n', ''))
177 logging.info('Python Executable Path: ' + sys.executable)
178
179 # Read in setup.py file to build list of required modules
180 settings = get_config()
181 setup_file_name = settings['setup_file']
182 with open(setup_file_name) as setup:
183 for line in setup:
184 if line[0:8] == "REQUIRES":
185 module_required = line[12:-2]
186 module_list = module_required.split(',')
187
188 # Clean up the module list
189 module_list = [module.replace('"', '').replace("'", '').replace(' ', '') for module in module_list]
190 module_list = [module.split('=')[0] for module in module_list]
191
192 # Log common module version information
193 for module in module_list:
194 try:
195 mod = importlib.import_module(module)
196 logging.info(module + ' Version: ' + mod.__version__)
197 logging.info(module + ' Path: ' + mod.__path__[0])
198 except ImportError as err:
199 logging.warning(err)
200
201 # Call the function and time it
202 t1_cpu = time.clock()
203 t1_time = time.time()
204 func(*a, **kw)
205 t2_cpu = time.clock()
206 t2_time = time.time()
207
208 # Log execution time
209 hours_cpu, remainder_cpu = divmod(t2_cpu - t1_cpu, 60 * 60)
210 minutes_cpu, seconds_cpu = divmod(remainder_cpu, 60)
211 hours_time, remainder_time = divmod(t2_time - t1_time, 60 * 60)
212 minutes_time, seconds_time = divmod(remainder_time, 60)
213 logging.info('Elapsed Real Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_time, minutes_time, seconds_time))
214 logging.info('Elapsed CPU Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_cpu, minutes_cpu, seconds_cpu))
215
216 return wrapped
217
218
219 def log_fail(func):
220 """Decorator to log crashes in the decorated code.
221
222 Parameters
223 ----------
224 func : func
225 The function to decorate.
226
227 Returns
228 -------
229 wrapped : func
230 The wrapped function.
231 """
232
233 @wraps(func)
234 def wrapped(*a, **kw):
235
236 try:
237
238 # Run the function
239 func(*a, **kw)
240 logging.info('Completed Successfully')
241
242 except Exception:
243 logging.critical(traceback.format_exc())
244 logging.critical('CRASHED')
245
246 return wrapped
247
[end of jwql/utils/logging_functions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/jwql/utils/logging_functions.py b/jwql/utils/logging_functions.py
--- a/jwql/utils/logging_functions.py
+++ b/jwql/utils/logging_functions.py
@@ -135,8 +135,7 @@
admin_account = settings['admin_account']
log_path = settings['log_dir']
- exempt_modules = []
- if user != admin_account and module not in exempt_modules and production_mode:
+ if user != admin_account or not production_mode:
module = os.path.join('dev', module)
if production_mode:
| {"golden_diff": "diff --git a/jwql/utils/logging_functions.py b/jwql/utils/logging_functions.py\n--- a/jwql/utils/logging_functions.py\n+++ b/jwql/utils/logging_functions.py\n@@ -135,8 +135,7 @@\n admin_account = settings['admin_account']\n log_path = settings['log_dir']\n \n- exempt_modules = []\n- if user != admin_account and module not in exempt_modules and production_mode:\n+ if user != admin_account or not production_mode:\n module = os.path.join('dev', module)\n \n if production_mode:\n", "issue": "logging_functions should write logs from dev & test VMs to dev/\nCurrently `logging_functions.py` will write new log files to the `dev/` directory if the script being executed is not being executed from the admin account. This should be extended to write to `dev/` when the script is run from personal accounts from the dev or test VMs. \r\n\r\nIn other words, all logs should be written to `dev/` unless they are being run from the admin account on one of the production VMs. \n", "before_files": [{"content": "\n\"\"\" Logging functions for the ``jwql`` automation platform.\n\nThis module provides decorators to log the execution of modules. Log\nfiles are written to the ``logs/`` directory in the ``jwql`` central\nstorage area, named by module name and timestamp, e.g.\n``monitor_filesystem/monitor_filesystem_2018-06-20-15:22:51.log``\n\n\nAuthors\n-------\n\n - Catherine Martlin 2018\n - Alex Viana, 2013 (WFC3 QL Version)\n\nUse\n---\n\n To log the execution of a module, use:\n ::\n\n import os\n import logging\n\n from jwql.logging.logging_functions import configure_logging\n from jwql.logging.logging_functions import log_info\n from jwql.logging.logging_functions import log_fail\n\n @log_info\n @log_fail\n def my_main_function():\n pass\n\n if __name__ == '__main__':\n\n module = os.path.basename(__file__).replace('.py', '')\n configure_logging(module)\n\n my_main_function()\n\nDependencies\n------------\n\n The user must have a configuration file named ``config.json``\n placed in the ``utils`` directory.\n\n\nReferences\n----------\n This code is adopted and updated from python routine\n ``logging_functions.py`` written by Alex Viana, 2013 for the WFC3\n Quicklook automation platform.\n\"\"\"\n\nimport datetime\nimport getpass\nimport importlib\nimport logging\nimport os\nimport pwd\nimport socket\nimport sys\nimport time\nimport traceback\n\nfrom functools import wraps\n\nfrom jwql.utils.permissions import set_permissions\nfrom jwql.utils.utils import get_config, ensure_dir_exists\n\nLOG_FILE_LOC = ''\nPRODUCTION_BOOL = ''\n\n\ndef configure_logging(module, production_mode=True, path='./'):\n \"\"\"Configure the log file with a standard logging format.\n\n Parameters\n ----------\n module : str\n The name of the module being logged.\n production_mode : bool\n Whether or not the output should be written to the production\n environement.\n path : str\n Where to write the log if user-supplied path; default to working dir.\n \"\"\"\n\n # Determine log file location\n if production_mode:\n log_file = make_log_file(module)\n else:\n log_file = make_log_file(module, production_mode=False, path=path)\n global LOG_FILE_LOC\n global PRODUCTION_BOOL\n LOG_FILE_LOC = log_file\n PRODUCTION_BOOL = production_mode\n\n # Create the log file and set the permissions\n logging.basicConfig(filename=log_file,\n format='%(asctime)s %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S %p',\n level=logging.INFO)\n set_permissions(log_file)\n\n\ndef make_log_file(module, production_mode=True, path='./'):\n \"\"\"Create the log file name based on the module name.\n\n The name of the ``log_file`` is a combination of the name of the\n module being logged and the current datetime.\n\n Parameters\n ----------\n module : str\n The name of the module being logged.\n production_mode : bool\n Whether or not the output should be written to the production\n environment.\n path : str\n Where to write the log if user-supplied path; default to\n working dir.\n\n Returns\n -------\n log_file : str\n The full path to where the log file will be written to.\n \"\"\"\n\n timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M')\n filename = '{0}_{1}.log'.format(module, timestamp)\n user = pwd.getpwuid(os.getuid()).pw_name\n\n settings = get_config()\n admin_account = settings['admin_account']\n log_path = settings['log_dir']\n\n exempt_modules = []\n if user != admin_account and module not in exempt_modules and production_mode:\n module = os.path.join('dev', module)\n\n if production_mode:\n log_file = os.path.join(log_path, module, filename)\n else:\n log_file = os.path.join(path, filename)\n\n ensure_dir_exists(os.path.dirname(log_file))\n\n return log_file\n\n\ndef log_info(func):\n \"\"\"Decorator to log useful system information.\n\n This function can be used as a decorator to log user environment\n and system information. Future packages we want to track can be\n added or removed as necessary.\n\n Parameters\n ----------\n func : func\n The function to decorate.\n\n Returns\n -------\n wrapped : func\n The wrapped function.\n \"\"\"\n\n @wraps(func)\n def wrapped(*a, **kw):\n\n # Log environment information\n logging.info('User: ' + getpass.getuser())\n logging.info('System: ' + socket.gethostname())\n logging.info('Python Version: ' + sys.version.replace('\\n', ''))\n logging.info('Python Executable Path: ' + sys.executable)\n\n # Read in setup.py file to build list of required modules\n settings = get_config()\n setup_file_name = settings['setup_file']\n with open(setup_file_name) as setup:\n for line in setup:\n if line[0:8] == \"REQUIRES\":\n module_required = line[12:-2]\n module_list = module_required.split(',')\n\n # Clean up the module list\n module_list = [module.replace('\"', '').replace(\"'\", '').replace(' ', '') for module in module_list]\n module_list = [module.split('=')[0] for module in module_list]\n\n # Log common module version information\n for module in module_list:\n try:\n mod = importlib.import_module(module)\n logging.info(module + ' Version: ' + mod.__version__)\n logging.info(module + ' Path: ' + mod.__path__[0])\n except ImportError as err:\n logging.warning(err)\n\n # Call the function and time it\n t1_cpu = time.clock()\n t1_time = time.time()\n func(*a, **kw)\n t2_cpu = time.clock()\n t2_time = time.time()\n\n # Log execution time\n hours_cpu, remainder_cpu = divmod(t2_cpu - t1_cpu, 60 * 60)\n minutes_cpu, seconds_cpu = divmod(remainder_cpu, 60)\n hours_time, remainder_time = divmod(t2_time - t1_time, 60 * 60)\n minutes_time, seconds_time = divmod(remainder_time, 60)\n logging.info('Elapsed Real Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_time, minutes_time, seconds_time))\n logging.info('Elapsed CPU Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_cpu, minutes_cpu, seconds_cpu))\n\n return wrapped\n\n\ndef log_fail(func):\n \"\"\"Decorator to log crashes in the decorated code.\n\n Parameters\n ----------\n func : func\n The function to decorate.\n\n Returns\n -------\n wrapped : func\n The wrapped function.\n \"\"\"\n\n @wraps(func)\n def wrapped(*a, **kw):\n\n try:\n\n # Run the function\n func(*a, **kw)\n logging.info('Completed Successfully')\n\n except Exception:\n logging.critical(traceback.format_exc())\n logging.critical('CRASHED')\n\n return wrapped\n", "path": "jwql/utils/logging_functions.py"}]} | 2,905 | 125 |
gh_patches_debug_36890 | rasdani/github-patches | git_diff | bokeh__bokeh-4021 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow for the injection of raw HTML code
Currently, the Widget library contains a `Paragraph` and a `PreText` widget allowing the user to put basic text on the rendered page. Neither of these widgets allows for the inclusion of formatted text using HTML markup. A widget should be added to support the inclusion of raw HTML. The widget can be a simple div named div tag.
</issue>
<code>
[start of bokeh/models/widgets/markups.py]
1 """ Various kinds of markup (static content) widgets.
2
3 """
4 from __future__ import absolute_import
5
6 from ...core.properties import abstract
7 from ...core.properties import Int, String
8 from .widget import Widget
9
10 @abstract
11 class Markup(Widget):
12 """ Base class for HTML markup widget models. """
13
14 class Paragraph(Markup):
15 """ A block (paragraph) of text.
16
17 """
18
19 text = String(default="", help="""
20 The contents of the widget.
21 """)
22
23 width = Int(500, help="""
24 The width of the block in pixels.
25 """)
26
27 height = Int(400, help="""
28 The height of the block in pixels.
29 """)
30
31
32 class PreText(Paragraph):
33 """ A block (paragraph) of pre-formatted text.
34
35 """
36
[end of bokeh/models/widgets/markups.py]
[start of examples/plotting/file/slider_callback_policy.py]
1 from bokeh.io import vform, output_file, show
2 from bokeh.models import CustomJS, Slider, Paragraph, PreText
3
4 # NOTE: the JS functions to forvide the format code for strings is found the answer
5 # from the user fearphage at http://stackoverflow.com/questions/610406/javascript-equivalent-to-printf-string-format
6 callback = CustomJS(code="""
7 var s1 = slider1.get('value')
8 var s2 = slider2.get('value')
9 var s3 = slider3.get('value')
10
11 if (!String.prototype.format) {
12 String.prototype.format = function() {
13 var args = arguments;
14 return this.replace(/{(\d+)}/g, function(match, number) {
15 return typeof args[number] != 'undefined'
16 ? args[number]
17 : match
18 ;
19 });
20 };
21 }
22
23 para.set('text', "Slider Values\\n\\n Slider 1: {0}\\n Slider 2: {1}\\n Slider 3: {2}".format(s1, s2, s3))
24 """)
25
26 para = PreText(text = "Slider Values:\n\n Slider 1: 0\n Slider 2: 0\n Slider 3: 0", width = 200, height = 150)
27
28 s1 = Slider(title="Slider 1 (Continuous)", start=0, end=1000, value=0, step=1, callback=callback, callback_policy="continuous")
29 s2 = Slider(title="Slider 2 (Throttle)", start=0, end=1000, value=0, step=1, callback=callback, callback_policy="throttle", callback_throttle=2000)
30 s3 = Slider(title="Slider 3 (Mouse Up)", start=0, end=1000, value=0, step=1, callback=callback, callback_policy="mouseup")
31
32 callback.args['para'] = para
33 callback.args['slider1'] = s1
34 callback.args['slider2'] = s2
35 callback.args['slider3'] = s3
36
37 output_file('slider_callback_policy.html')
38
39 show(vform(s1, s2, s3, para))
40
[end of examples/plotting/file/slider_callback_policy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bokeh/models/widgets/markups.py b/bokeh/models/widgets/markups.py
--- a/bokeh/models/widgets/markups.py
+++ b/bokeh/models/widgets/markups.py
@@ -4,18 +4,13 @@
from __future__ import absolute_import
from ...core.properties import abstract
-from ...core.properties import Int, String
+from ...core.properties import Int, String, Bool
from .widget import Widget
@abstract
class Markup(Widget):
""" Base class for HTML markup widget models. """
-class Paragraph(Markup):
- """ A block (paragraph) of text.
-
- """
-
text = String(default="", help="""
The contents of the widget.
""")
@@ -29,6 +24,20 @@
""")
+class Paragraph(Markup):
+ """ A block (paragraph) of text.
+
+ """
+
+class Div(Markup):
+ """ A block (div) of text.
+
+ """
+
+ render_as_text = Bool(False, help="""
+ Should the text be rendered as raw text (False, default), or should the text be interprited as an HTML string (True)
+ """)
+
class PreText(Paragraph):
""" A block (paragraph) of pre-formatted text.
diff --git a/examples/plotting/file/slider_callback_policy.py b/examples/plotting/file/slider_callback_policy.py
--- a/examples/plotting/file/slider_callback_policy.py
+++ b/examples/plotting/file/slider_callback_policy.py
@@ -1,5 +1,5 @@
from bokeh.io import vform, output_file, show
-from bokeh.models import CustomJS, Slider, Paragraph, PreText
+from bokeh.models import CustomJS, Slider, Div
# NOTE: the JS functions to forvide the format code for strings is found the answer
# from the user fearphage at http://stackoverflow.com/questions/610406/javascript-equivalent-to-printf-string-format
@@ -20,10 +20,10 @@
};
}
- para.set('text', "Slider Values\\n\\n Slider 1: {0}\\n Slider 2: {1}\\n Slider 3: {2}".format(s1, s2, s3))
+ para.set('text', "<h1>Slider Values</h1><p>Slider 1: {0}<p>Slider 2: {1}<p>Slider 3: {2}".format(s1, s2, s3))
""")
-para = PreText(text = "Slider Values:\n\n Slider 1: 0\n Slider 2: 0\n Slider 3: 0", width = 200, height = 150)
+para = Div(text="<h1>Slider Values:</h1><p>Slider 1: 0<p>Slider 2: 0<p>Slider 3: 0", width=200, height=150, render_as_text=False)
s1 = Slider(title="Slider 1 (Continuous)", start=0, end=1000, value=0, step=1, callback=callback, callback_policy="continuous")
s2 = Slider(title="Slider 2 (Throttle)", start=0, end=1000, value=0, step=1, callback=callback, callback_policy="throttle", callback_throttle=2000)
| {"golden_diff": "diff --git a/bokeh/models/widgets/markups.py b/bokeh/models/widgets/markups.py\n--- a/bokeh/models/widgets/markups.py\n+++ b/bokeh/models/widgets/markups.py\n@@ -4,18 +4,13 @@\n from __future__ import absolute_import\n \n from ...core.properties import abstract\n-from ...core.properties import Int, String\n+from ...core.properties import Int, String, Bool\n from .widget import Widget\n \n @abstract\n class Markup(Widget):\n \"\"\" Base class for HTML markup widget models. \"\"\"\n \n-class Paragraph(Markup):\n- \"\"\" A block (paragraph) of text.\n-\n- \"\"\"\n-\n text = String(default=\"\", help=\"\"\"\n The contents of the widget.\n \"\"\")\n@@ -29,6 +24,20 @@\n \"\"\")\n \n \n+class Paragraph(Markup):\n+ \"\"\" A block (paragraph) of text.\n+\n+ \"\"\"\n+\n+class Div(Markup):\n+ \"\"\" A block (div) of text.\n+\n+ \"\"\"\n+\n+ render_as_text = Bool(False, help=\"\"\"\n+ Should the text be rendered as raw text (False, default), or should the text be interprited as an HTML string (True)\n+ \"\"\")\n+\n class PreText(Paragraph):\n \"\"\" A block (paragraph) of pre-formatted text.\n \ndiff --git a/examples/plotting/file/slider_callback_policy.py b/examples/plotting/file/slider_callback_policy.py\n--- a/examples/plotting/file/slider_callback_policy.py\n+++ b/examples/plotting/file/slider_callback_policy.py\n@@ -1,5 +1,5 @@\n from bokeh.io import vform, output_file, show\n-from bokeh.models import CustomJS, Slider, Paragraph, PreText\n+from bokeh.models import CustomJS, Slider, Div\n \n # NOTE: the JS functions to forvide the format code for strings is found the answer\n # from the user fearphage at http://stackoverflow.com/questions/610406/javascript-equivalent-to-printf-string-format\n@@ -20,10 +20,10 @@\n };\n }\n \n- para.set('text', \"Slider Values\\\\n\\\\n Slider 1: {0}\\\\n Slider 2: {1}\\\\n Slider 3: {2}\".format(s1, s2, s3))\n+ para.set('text', \"<h1>Slider Values</h1><p>Slider 1: {0}<p>Slider 2: {1}<p>Slider 3: {2}\".format(s1, s2, s3))\n \"\"\")\n \n-para = PreText(text = \"Slider Values:\\n\\n Slider 1: 0\\n Slider 2: 0\\n Slider 3: 0\", width = 200, height = 150)\n+para = Div(text=\"<h1>Slider Values:</h1><p>Slider 1: 0<p>Slider 2: 0<p>Slider 3: 0\", width=200, height=150, render_as_text=False)\n \n s1 = Slider(title=\"Slider 1 (Continuous)\", start=0, end=1000, value=0, step=1, callback=callback, callback_policy=\"continuous\")\n s2 = Slider(title=\"Slider 2 (Throttle)\", start=0, end=1000, value=0, step=1, callback=callback, callback_policy=\"throttle\", callback_throttle=2000)\n", "issue": "Allow for the injection of raw HTML code\nCurrently, the Widget library contains a `Paragraph` and a `PreText` widget allowing the user to put basic text on the rendered page. Neither of these widgets allows for the inclusion of formatted text using HTML markup. A widget should be added to support the inclusion of raw HTML. The widget can be a simple div named div tag.\n\n", "before_files": [{"content": "\"\"\" Various kinds of markup (static content) widgets.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom ...core.properties import abstract\nfrom ...core.properties import Int, String\nfrom .widget import Widget\n\n@abstract\nclass Markup(Widget):\n \"\"\" Base class for HTML markup widget models. \"\"\"\n\nclass Paragraph(Markup):\n \"\"\" A block (paragraph) of text.\n\n \"\"\"\n\n text = String(default=\"\", help=\"\"\"\n The contents of the widget.\n \"\"\")\n\n width = Int(500, help=\"\"\"\n The width of the block in pixels.\n \"\"\")\n\n height = Int(400, help=\"\"\"\n The height of the block in pixels.\n \"\"\")\n\n\nclass PreText(Paragraph):\n \"\"\" A block (paragraph) of pre-formatted text.\n\n \"\"\"\n", "path": "bokeh/models/widgets/markups.py"}, {"content": "from bokeh.io import vform, output_file, show\nfrom bokeh.models import CustomJS, Slider, Paragraph, PreText\n\n# NOTE: the JS functions to forvide the format code for strings is found the answer\n# from the user fearphage at http://stackoverflow.com/questions/610406/javascript-equivalent-to-printf-string-format\ncallback = CustomJS(code=\"\"\"\n var s1 = slider1.get('value')\n var s2 = slider2.get('value')\n var s3 = slider3.get('value')\n\n if (!String.prototype.format) {\n String.prototype.format = function() {\n var args = arguments;\n return this.replace(/{(\\d+)}/g, function(match, number) {\n return typeof args[number] != 'undefined'\n ? args[number]\n : match\n ;\n });\n };\n }\n\n para.set('text', \"Slider Values\\\\n\\\\n Slider 1: {0}\\\\n Slider 2: {1}\\\\n Slider 3: {2}\".format(s1, s2, s3))\n\"\"\")\n\npara = PreText(text = \"Slider Values:\\n\\n Slider 1: 0\\n Slider 2: 0\\n Slider 3: 0\", width = 200, height = 150)\n\ns1 = Slider(title=\"Slider 1 (Continuous)\", start=0, end=1000, value=0, step=1, callback=callback, callback_policy=\"continuous\")\ns2 = Slider(title=\"Slider 2 (Throttle)\", start=0, end=1000, value=0, step=1, callback=callback, callback_policy=\"throttle\", callback_throttle=2000)\ns3 = Slider(title=\"Slider 3 (Mouse Up)\", start=0, end=1000, value=0, step=1, callback=callback, callback_policy=\"mouseup\")\n\ncallback.args['para'] = para\ncallback.args['slider1'] = s1\ncallback.args['slider2'] = s2\ncallback.args['slider3'] = s3\n\noutput_file('slider_callback_policy.html')\n\nshow(vform(s1, s2, s3, para))\n", "path": "examples/plotting/file/slider_callback_policy.py"}]} | 1,440 | 762 |
gh_patches_debug_14897 | rasdani/github-patches | git_diff | qtile__qtile-3099 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Widget for updatable image
I don't want to reinvent the wheel, so will check before.
I use GenPollText for my keyboard layout indicator but instead I want to see the flag (image). As I change layout the image should be changed. Do Qtile has such widget or proper way to do that?
</issue>
<code>
[start of libqtile/widget/image.py]
1 # Copyright (c) 2013 dequis
2 # Copyright (c) 2014 Sean Vig
3 # Copyright (c) 2014 Adi Sieker
4 #
5 # Permission is hereby granted, free of charge, to any person obtaining a copy
6 # of this software and associated documentation files (the "Software"), to deal
7 # in the Software without restriction, including without limitation the rights
8 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 # copies of the Software, and to permit persons to whom the Software is
10 # furnished to do so, subject to the following conditions:
11 #
12 # The above copyright notice and this permission notice shall be included in
13 # all copies or substantial portions of the Software.
14 #
15 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 # SOFTWARE.
22 import os
23
24 from libqtile import bar
25 from libqtile.images import Img
26 from libqtile.log_utils import logger
27 from libqtile.widget import base
28
29
30 class Image(base._Widget, base.MarginMixin):
31 """Display a PNG image on the bar"""
32 orientations = base.ORIENTATION_BOTH
33 defaults = [
34 ("scale", True, "Enable/Disable image scaling"),
35 ("rotate", 0.0, "rotate the image in degrees counter-clockwise"),
36 ("filename", None, "Image filename. Can contain '~'"),
37 ]
38
39 def __init__(self, length=bar.CALCULATED, **config):
40 base._Widget.__init__(self, length, **config)
41 self.add_defaults(Image.defaults)
42 self.add_defaults(base.MarginMixin.defaults)
43
44 # make the default 0 instead
45 self._variable_defaults["margin"] = 0
46
47 def _configure(self, qtile, bar):
48 base._Widget._configure(self, qtile, bar)
49 self.img = None
50
51 if not self.filename:
52 logger.warning("Image filename not set!")
53 return
54
55 self.filename = os.path.expanduser(self.filename)
56
57 if not os.path.exists(self.filename):
58 logger.warning("Image does not exist: {}".format(self.filename))
59 return
60
61 img = Img.from_path(self.filename)
62 self.img = img
63 img.theta = self.rotate
64 if not self.scale:
65 return
66 if self.bar.horizontal:
67 new_height = self.bar.height - (self.margin_y * 2)
68 img.resize(height=new_height)
69 else:
70 new_width = self.bar.width - (self.margin_x * 2)
71 img.resize(width=new_width)
72
73 def draw(self):
74 if self.img is None:
75 return
76
77 self.drawer.clear(self.background or self.bar.background)
78 self.drawer.ctx.save()
79 self.drawer.ctx.translate(self.margin_x, self.margin_y)
80 self.drawer.ctx.set_source(self.img.pattern)
81 self.drawer.ctx.paint()
82 self.drawer.ctx.restore()
83
84 if self.bar.horizontal:
85 self.drawer.draw(offsetx=self.offset, offsety=self.offsety, width=self.width)
86 else:
87 self.drawer.draw(offsety=self.offset, offsetx=self.offsetx, height=self.width)
88
89 def calculate_length(self):
90 if self.img is None:
91 return 0
92
93 if self.bar.horizontal:
94 return self.img.width + (self.margin_x * 2)
95 else:
96 return self.img.height + (self.margin_y * 2)
97
[end of libqtile/widget/image.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libqtile/widget/image.py b/libqtile/widget/image.py
--- a/libqtile/widget/image.py
+++ b/libqtile/widget/image.py
@@ -46,6 +46,9 @@
def _configure(self, qtile, bar):
base._Widget._configure(self, qtile, bar)
+ self._update_image()
+
+ def _update_image(self):
self.img = None
if not self.filename:
@@ -94,3 +97,13 @@
return self.img.width + (self.margin_x * 2)
else:
return self.img.height + (self.margin_y * 2)
+
+ def cmd_update(self, filename):
+ old_length = self.calculate_length()
+ self.filename = filename
+ self._update_image()
+
+ if self.calculate_length() == old_length:
+ self.draw()
+ else:
+ self.bar.draw()
| {"golden_diff": "diff --git a/libqtile/widget/image.py b/libqtile/widget/image.py\n--- a/libqtile/widget/image.py\n+++ b/libqtile/widget/image.py\n@@ -46,6 +46,9 @@\n \n def _configure(self, qtile, bar):\n base._Widget._configure(self, qtile, bar)\n+ self._update_image()\n+\n+ def _update_image(self):\n self.img = None\n \n if not self.filename:\n@@ -94,3 +97,13 @@\n return self.img.width + (self.margin_x * 2)\n else:\n return self.img.height + (self.margin_y * 2)\n+\n+ def cmd_update(self, filename):\n+ old_length = self.calculate_length()\n+ self.filename = filename\n+ self._update_image()\n+\n+ if self.calculate_length() == old_length:\n+ self.draw()\n+ else:\n+ self.bar.draw()\n", "issue": "Widget for updatable image\nI don't want to reinvent the wheel, so will check before.\r\nI use GenPollText for my keyboard layout indicator but instead I want to see the flag (image). As I change layout the image should be changed. Do Qtile has such widget or proper way to do that?\n", "before_files": [{"content": "# Copyright (c) 2013 dequis\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2014 Adi Sieker\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport os\n\nfrom libqtile import bar\nfrom libqtile.images import Img\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\n\nclass Image(base._Widget, base.MarginMixin):\n \"\"\"Display a PNG image on the bar\"\"\"\n orientations = base.ORIENTATION_BOTH\n defaults = [\n (\"scale\", True, \"Enable/Disable image scaling\"),\n (\"rotate\", 0.0, \"rotate the image in degrees counter-clockwise\"),\n (\"filename\", None, \"Image filename. Can contain '~'\"),\n ]\n\n def __init__(self, length=bar.CALCULATED, **config):\n base._Widget.__init__(self, length, **config)\n self.add_defaults(Image.defaults)\n self.add_defaults(base.MarginMixin.defaults)\n\n # make the default 0 instead\n self._variable_defaults[\"margin\"] = 0\n\n def _configure(self, qtile, bar):\n base._Widget._configure(self, qtile, bar)\n self.img = None\n\n if not self.filename:\n logger.warning(\"Image filename not set!\")\n return\n\n self.filename = os.path.expanduser(self.filename)\n\n if not os.path.exists(self.filename):\n logger.warning(\"Image does not exist: {}\".format(self.filename))\n return\n\n img = Img.from_path(self.filename)\n self.img = img\n img.theta = self.rotate\n if not self.scale:\n return\n if self.bar.horizontal:\n new_height = self.bar.height - (self.margin_y * 2)\n img.resize(height=new_height)\n else:\n new_width = self.bar.width - (self.margin_x * 2)\n img.resize(width=new_width)\n\n def draw(self):\n if self.img is None:\n return\n\n self.drawer.clear(self.background or self.bar.background)\n self.drawer.ctx.save()\n self.drawer.ctx.translate(self.margin_x, self.margin_y)\n self.drawer.ctx.set_source(self.img.pattern)\n self.drawer.ctx.paint()\n self.drawer.ctx.restore()\n\n if self.bar.horizontal:\n self.drawer.draw(offsetx=self.offset, offsety=self.offsety, width=self.width)\n else:\n self.drawer.draw(offsety=self.offset, offsetx=self.offsetx, height=self.width)\n\n def calculate_length(self):\n if self.img is None:\n return 0\n\n if self.bar.horizontal:\n return self.img.width + (self.margin_x * 2)\n else:\n return self.img.height + (self.margin_y * 2)\n", "path": "libqtile/widget/image.py"}]} | 1,589 | 205 |
gh_patches_debug_4946 | rasdani/github-patches | git_diff | conan-io__conan-5387 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
conan updates all my software installed with yum
Hi,
my system:
CentOs7, conan 1.16.0, packet manager yum
```python
installer = SystemPackageTool()
installer.update()
```
The above code will instuct conan to update ALL installed packets in the system. It was introduced here: https://github.com/conan-io/conan/pull/3181
This contradicts with the docu that states:
> update(): Updates the system package manager database. It’s called automatically from the install() method by default.
This would be achieved with `yum check-update` instead of `yum update`.
In my opinion conan should not touch any installed packets, unless expicitly instructed to do so. So I would like to see the old behavior back, like it is discribed in the docu.
Cheers
</issue>
<code>
[start of conans/client/tools/system_pm.py]
1 import os
2 import sys
3
4 from conans.client.runner import ConanRunner
5 from conans.client.tools.oss import OSInfo
6 from conans.client.tools.files import which
7 from conans.errors import ConanException
8 from conans.util.env_reader import get_env
9 from conans.util.fallbacks import default_output
10
11
12 class SystemPackageTool(object):
13
14 def __init__(self, runner=None, os_info=None, tool=None, recommends=False, output=None):
15
16 self._output = default_output(output, 'conans.client.tools.system_pm.SystemPackageTool')
17 os_info = os_info or OSInfo()
18 self._is_up_to_date = False
19 self._tool = tool or self._create_tool(os_info, output=self._output)
20 self._tool._sudo_str = self._get_sudo_str()
21 self._tool._runner = runner or ConanRunner(output=self._output)
22 self._tool._recommends = recommends
23
24 @staticmethod
25 def _get_sudo_str():
26 if not SystemPackageTool._is_sudo_enabled():
27 return ""
28
29 if hasattr(sys.stdout, "isatty") and not sys.stdout.isatty():
30 return "sudo -A "
31 else:
32 return "sudo "
33
34 @staticmethod
35 def _is_sudo_enabled():
36 if "CONAN_SYSREQUIRES_SUDO" not in os.environ:
37 if not which("sudo"):
38 return False
39 if os.name == 'posix' and os.geteuid() == 0:
40 return False
41 if os.name == 'nt':
42 return False
43 return get_env("CONAN_SYSREQUIRES_SUDO", True)
44
45 @staticmethod
46 def _get_sysrequire_mode():
47 allowed_modes = ("enabled", "verify", "disabled")
48 mode = get_env("CONAN_SYSREQUIRES_MODE", "enabled")
49 mode_lower = mode.lower()
50 if mode_lower not in allowed_modes:
51 raise ConanException("CONAN_SYSREQUIRES_MODE=%s is not allowed, allowed modes=%r"
52 % (mode, allowed_modes))
53 return mode_lower
54
55 @staticmethod
56 def _create_tool(os_info, output):
57 if os_info.with_apt:
58 return AptTool(output=output)
59 elif os_info.with_yum:
60 return YumTool(output=output)
61 elif os_info.with_pacman:
62 return PacManTool(output=output)
63 elif os_info.is_macos:
64 return BrewTool(output=output)
65 elif os_info.is_freebsd:
66 return PkgTool(output=output)
67 elif os_info.is_solaris:
68 return PkgUtilTool(output=output)
69 elif os_info.with_zypper:
70 return ZypperTool(output=output)
71 else:
72 return NullTool(output=output)
73
74 def add_repository(self, repository, repo_key=None, update=True):
75 self._tool.add_repository(repository, repo_key=repo_key)
76 if update:
77 self.update()
78
79 def update(self):
80 """
81 Get the system package tool update command
82 """
83 mode = self._get_sysrequire_mode()
84 if mode in ("disabled", "verify"):
85 self._output.info("Not updating system_requirements. CONAN_SYSREQUIRES_MODE=%s" % mode)
86 return
87 self._is_up_to_date = True
88 self._tool.update()
89
90 def install(self, packages, update=True, force=False):
91 """
92 Get the system package tool install command.
93 '"""
94 packages = [packages] if isinstance(packages, str) else list(packages)
95
96 mode = self._get_sysrequire_mode()
97
98 if mode in ("verify", "disabled"):
99 # Report to output packages need to be installed
100 if mode == "disabled":
101 self._output.info("The following packages need to be installed:\n %s"
102 % "\n".join(packages))
103 return
104
105 if mode == "verify" and not self._installed(packages):
106 self._output.error("The following packages need to be installed:\n %s"
107 % "\n".join(packages))
108 raise ConanException("Aborted due to CONAN_SYSREQUIRES_MODE=%s. "
109 "Some system packages need to be installed" % mode)
110
111 if not force and self._installed(packages):
112 return
113
114 # From here system packages can be updated/modified
115 if update and not self._is_up_to_date:
116 self.update()
117 self._install_any(packages)
118
119 def _installed(self, packages):
120 if not packages:
121 return True
122
123 for pkg in packages:
124 if self._tool.installed(pkg):
125 self._output.info("Package already installed: %s" % pkg)
126 return True
127 return False
128
129 def _install_any(self, packages):
130 if len(packages) == 1:
131 return self._tool.install(packages[0])
132 for pkg in packages:
133 try:
134 return self._tool.install(pkg)
135 except ConanException:
136 pass
137 raise ConanException("Could not install any of %s" % packages)
138
139
140 class BaseTool(object):
141 def __init__(self, output=None):
142 self._output = default_output(output, 'conans.client.tools.system_pm.BaseTool')
143
144
145 class NullTool(BaseTool):
146 def add_repository(self, repository, repo_key=None):
147 pass
148
149 def update(self):
150 pass
151
152 def install(self, package_name):
153 self._output.warn("Only available for linux with apt-get, yum, or pacman or OSX with brew or"
154 " FreeBSD with pkg or Solaris with pkgutil")
155
156 def installed(self, package_name):
157 return False
158
159
160 class AptTool(BaseTool):
161 def add_repository(self, repository, repo_key=None):
162 _run(self._runner, "%sapt-add-repository %s" % (self._sudo_str, repository),
163 output=self._output)
164 if repo_key:
165 _run(self._runner, "wget -qO - %s | %sapt-key add -" % (repo_key, self._sudo_str),
166 output=self._output)
167
168 def update(self):
169 _run(self._runner, "%sapt-get update" % self._sudo_str, output=self._output)
170
171 def install(self, package_name):
172 recommends_str = '' if self._recommends else '--no-install-recommends '
173 _run(self._runner,
174 "%sapt-get install -y %s%s" % (self._sudo_str, recommends_str, package_name),
175 output=self._output)
176
177 def installed(self, package_name):
178 exit_code = self._runner("dpkg-query -W -f='${Status}' %s | grep -q \"ok installed\""
179 % package_name, None)
180 return exit_code == 0
181
182
183 class YumTool(BaseTool):
184 def add_repository(self, repository, repo_key=None):
185 raise ConanException("YumTool::add_repository not implemented")
186
187 def update(self):
188 _run(self._runner, "%syum update -y" % self._sudo_str, accepted_returns=[0, 100],
189 output=self._output)
190
191 def install(self, package_name):
192 _run(self._runner, "%syum install -y %s" % (self._sudo_str, package_name),
193 output=self._output)
194
195 def installed(self, package_name):
196 exit_code = self._runner("rpm -q %s" % package_name, None)
197 return exit_code == 0
198
199
200 class BrewTool(BaseTool):
201 def add_repository(self, repository, repo_key=None):
202 raise ConanException("BrewTool::add_repository not implemented")
203
204 def update(self):
205 _run(self._runner, "brew update", output=self._output)
206
207 def install(self, package_name):
208 _run(self._runner, "brew install %s" % package_name, output=self._output)
209
210 def installed(self, package_name):
211 exit_code = self._runner('test -n "$(brew ls --versions %s)"' % package_name, None)
212 return exit_code == 0
213
214
215 class PkgTool(BaseTool):
216 def add_repository(self, repository, repo_key=None):
217 raise ConanException("PkgTool::add_repository not implemented")
218
219 def update(self):
220 _run(self._runner, "%spkg update" % self._sudo_str, output=self._output)
221
222 def install(self, package_name):
223 _run(self._runner, "%spkg install -y %s" % (self._sudo_str, package_name),
224 output=self._output)
225
226 def installed(self, package_name):
227 exit_code = self._runner("pkg info %s" % package_name, None)
228 return exit_code == 0
229
230
231 class PkgUtilTool(BaseTool):
232 def add_repository(self, repository, repo_key=None):
233 raise ConanException("PkgUtilTool::add_repository not implemented")
234
235 def update(self):
236 _run(self._runner, "%spkgutil --catalog" % self._sudo_str, output=self._output)
237
238 def install(self, package_name):
239 _run(self._runner, "%spkgutil --install --yes %s" % (self._sudo_str, package_name),
240 output=self._output)
241
242 def installed(self, package_name):
243 exit_code = self._runner('test -n "`pkgutil --list %s`"' % package_name, None)
244 return exit_code == 0
245
246
247 class ChocolateyTool(BaseTool):
248 def add_repository(self, repository, repo_key=None):
249 raise ConanException("ChocolateyTool::add_repository not implemented")
250
251 def update(self):
252 _run(self._runner, "choco outdated", output=self._output)
253
254 def install(self, package_name):
255 _run(self._runner, "choco install --yes %s" % package_name, output=self._output)
256
257 def installed(self, package_name):
258 exit_code = self._runner('choco search --local-only --exact %s | '
259 'findstr /c:"1 packages installed."' % package_name, None)
260 return exit_code == 0
261
262
263 class PacManTool(BaseTool):
264 def add_repository(self, repository, repo_key=None):
265 raise ConanException("PacManTool::add_repository not implemented")
266
267 def update(self):
268 _run(self._runner, "%spacman -Syyu --noconfirm" % self._sudo_str, output=self._output)
269
270 def install(self, package_name):
271 _run(self._runner, "%spacman -S --noconfirm %s" % (self._sudo_str, package_name),
272 output=self._output)
273
274 def installed(self, package_name):
275 exit_code = self._runner("pacman -Qi %s" % package_name, None)
276 return exit_code == 0
277
278
279 class ZypperTool(BaseTool):
280 def add_repository(self, repository, repo_key=None):
281 raise ConanException("ZypperTool::add_repository not implemented")
282
283 def update(self):
284 _run(self._runner, "%szypper --non-interactive ref" % self._sudo_str, output=self._output)
285
286 def install(self, package_name):
287 _run(self._runner, "%szypper --non-interactive in %s" % (self._sudo_str, package_name),
288 output=self._output)
289
290 def installed(self, package_name):
291 exit_code = self._runner("rpm -q %s" % package_name, None)
292 return exit_code == 0
293
294
295 def _run(runner, command, output, accepted_returns=None):
296 accepted_returns = accepted_returns or [0, ]
297 output.info("Running: %s" % command)
298 if runner(command, True) not in accepted_returns:
299 raise ConanException("Command '%s' failed" % command)
300
[end of conans/client/tools/system_pm.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conans/client/tools/system_pm.py b/conans/client/tools/system_pm.py
--- a/conans/client/tools/system_pm.py
+++ b/conans/client/tools/system_pm.py
@@ -185,7 +185,7 @@
raise ConanException("YumTool::add_repository not implemented")
def update(self):
- _run(self._runner, "%syum update -y" % self._sudo_str, accepted_returns=[0, 100],
+ _run(self._runner, "%syum check-update -y" % self._sudo_str, accepted_returns=[0, 100],
output=self._output)
def install(self, package_name):
| {"golden_diff": "diff --git a/conans/client/tools/system_pm.py b/conans/client/tools/system_pm.py\n--- a/conans/client/tools/system_pm.py\n+++ b/conans/client/tools/system_pm.py\n@@ -185,7 +185,7 @@\n raise ConanException(\"YumTool::add_repository not implemented\")\n \n def update(self):\n- _run(self._runner, \"%syum update -y\" % self._sudo_str, accepted_returns=[0, 100],\n+ _run(self._runner, \"%syum check-update -y\" % self._sudo_str, accepted_returns=[0, 100],\n output=self._output)\n \n def install(self, package_name):\n", "issue": "conan updates all my software installed with yum\nHi,\r\n\r\nmy system:\r\nCentOs7, conan 1.16.0, packet manager yum\r\n\r\n```python\r\ninstaller = SystemPackageTool()\r\ninstaller.update()\r\n```\r\nThe above code will instuct conan to update ALL installed packets in the system. It was introduced here: https://github.com/conan-io/conan/pull/3181\r\n\r\nThis contradicts with the docu that states:\r\n\r\n> update(): Updates the system package manager database. It\u2019s called automatically from the install() method by default.\r\n\r\nThis would be achieved with `yum check-update` instead of `yum update`.\r\nIn my opinion conan should not touch any installed packets, unless expicitly instructed to do so. So I would like to see the old behavior back, like it is discribed in the docu.\r\n\r\nCheers\n", "before_files": [{"content": "import os\nimport sys\n\nfrom conans.client.runner import ConanRunner\nfrom conans.client.tools.oss import OSInfo\nfrom conans.client.tools.files import which\nfrom conans.errors import ConanException\nfrom conans.util.env_reader import get_env\nfrom conans.util.fallbacks import default_output\n\n\nclass SystemPackageTool(object):\n\n def __init__(self, runner=None, os_info=None, tool=None, recommends=False, output=None):\n\n self._output = default_output(output, 'conans.client.tools.system_pm.SystemPackageTool')\n os_info = os_info or OSInfo()\n self._is_up_to_date = False\n self._tool = tool or self._create_tool(os_info, output=self._output)\n self._tool._sudo_str = self._get_sudo_str()\n self._tool._runner = runner or ConanRunner(output=self._output)\n self._tool._recommends = recommends\n\n @staticmethod\n def _get_sudo_str():\n if not SystemPackageTool._is_sudo_enabled():\n return \"\"\n\n if hasattr(sys.stdout, \"isatty\") and not sys.stdout.isatty():\n return \"sudo -A \"\n else:\n return \"sudo \"\n\n @staticmethod\n def _is_sudo_enabled():\n if \"CONAN_SYSREQUIRES_SUDO\" not in os.environ:\n if not which(\"sudo\"):\n return False\n if os.name == 'posix' and os.geteuid() == 0:\n return False\n if os.name == 'nt':\n return False\n return get_env(\"CONAN_SYSREQUIRES_SUDO\", True)\n\n @staticmethod\n def _get_sysrequire_mode():\n allowed_modes = (\"enabled\", \"verify\", \"disabled\")\n mode = get_env(\"CONAN_SYSREQUIRES_MODE\", \"enabled\")\n mode_lower = mode.lower()\n if mode_lower not in allowed_modes:\n raise ConanException(\"CONAN_SYSREQUIRES_MODE=%s is not allowed, allowed modes=%r\"\n % (mode, allowed_modes))\n return mode_lower\n\n @staticmethod\n def _create_tool(os_info, output):\n if os_info.with_apt:\n return AptTool(output=output)\n elif os_info.with_yum:\n return YumTool(output=output)\n elif os_info.with_pacman:\n return PacManTool(output=output)\n elif os_info.is_macos:\n return BrewTool(output=output)\n elif os_info.is_freebsd:\n return PkgTool(output=output)\n elif os_info.is_solaris:\n return PkgUtilTool(output=output)\n elif os_info.with_zypper:\n return ZypperTool(output=output)\n else:\n return NullTool(output=output)\n\n def add_repository(self, repository, repo_key=None, update=True):\n self._tool.add_repository(repository, repo_key=repo_key)\n if update:\n self.update()\n\n def update(self):\n \"\"\"\n Get the system package tool update command\n \"\"\"\n mode = self._get_sysrequire_mode()\n if mode in (\"disabled\", \"verify\"):\n self._output.info(\"Not updating system_requirements. CONAN_SYSREQUIRES_MODE=%s\" % mode)\n return\n self._is_up_to_date = True\n self._tool.update()\n\n def install(self, packages, update=True, force=False):\n \"\"\"\n Get the system package tool install command.\n '\"\"\"\n packages = [packages] if isinstance(packages, str) else list(packages)\n\n mode = self._get_sysrequire_mode()\n\n if mode in (\"verify\", \"disabled\"):\n # Report to output packages need to be installed\n if mode == \"disabled\":\n self._output.info(\"The following packages need to be installed:\\n %s\"\n % \"\\n\".join(packages))\n return\n\n if mode == \"verify\" and not self._installed(packages):\n self._output.error(\"The following packages need to be installed:\\n %s\"\n % \"\\n\".join(packages))\n raise ConanException(\"Aborted due to CONAN_SYSREQUIRES_MODE=%s. \"\n \"Some system packages need to be installed\" % mode)\n\n if not force and self._installed(packages):\n return\n\n # From here system packages can be updated/modified\n if update and not self._is_up_to_date:\n self.update()\n self._install_any(packages)\n\n def _installed(self, packages):\n if not packages:\n return True\n\n for pkg in packages:\n if self._tool.installed(pkg):\n self._output.info(\"Package already installed: %s\" % pkg)\n return True\n return False\n\n def _install_any(self, packages):\n if len(packages) == 1:\n return self._tool.install(packages[0])\n for pkg in packages:\n try:\n return self._tool.install(pkg)\n except ConanException:\n pass\n raise ConanException(\"Could not install any of %s\" % packages)\n\n\nclass BaseTool(object):\n def __init__(self, output=None):\n self._output = default_output(output, 'conans.client.tools.system_pm.BaseTool')\n\n\nclass NullTool(BaseTool):\n def add_repository(self, repository, repo_key=None):\n pass\n\n def update(self):\n pass\n\n def install(self, package_name):\n self._output.warn(\"Only available for linux with apt-get, yum, or pacman or OSX with brew or\"\n \" FreeBSD with pkg or Solaris with pkgutil\")\n\n def installed(self, package_name):\n return False\n\n\nclass AptTool(BaseTool):\n def add_repository(self, repository, repo_key=None):\n _run(self._runner, \"%sapt-add-repository %s\" % (self._sudo_str, repository),\n output=self._output)\n if repo_key:\n _run(self._runner, \"wget -qO - %s | %sapt-key add -\" % (repo_key, self._sudo_str),\n output=self._output)\n\n def update(self):\n _run(self._runner, \"%sapt-get update\" % self._sudo_str, output=self._output)\n\n def install(self, package_name):\n recommends_str = '' if self._recommends else '--no-install-recommends '\n _run(self._runner,\n \"%sapt-get install -y %s%s\" % (self._sudo_str, recommends_str, package_name),\n output=self._output)\n\n def installed(self, package_name):\n exit_code = self._runner(\"dpkg-query -W -f='${Status}' %s | grep -q \\\"ok installed\\\"\"\n % package_name, None)\n return exit_code == 0\n\n\nclass YumTool(BaseTool):\n def add_repository(self, repository, repo_key=None):\n raise ConanException(\"YumTool::add_repository not implemented\")\n\n def update(self):\n _run(self._runner, \"%syum update -y\" % self._sudo_str, accepted_returns=[0, 100],\n output=self._output)\n\n def install(self, package_name):\n _run(self._runner, \"%syum install -y %s\" % (self._sudo_str, package_name),\n output=self._output)\n\n def installed(self, package_name):\n exit_code = self._runner(\"rpm -q %s\" % package_name, None)\n return exit_code == 0\n\n\nclass BrewTool(BaseTool):\n def add_repository(self, repository, repo_key=None):\n raise ConanException(\"BrewTool::add_repository not implemented\")\n\n def update(self):\n _run(self._runner, \"brew update\", output=self._output)\n\n def install(self, package_name):\n _run(self._runner, \"brew install %s\" % package_name, output=self._output)\n\n def installed(self, package_name):\n exit_code = self._runner('test -n \"$(brew ls --versions %s)\"' % package_name, None)\n return exit_code == 0\n\n\nclass PkgTool(BaseTool):\n def add_repository(self, repository, repo_key=None):\n raise ConanException(\"PkgTool::add_repository not implemented\")\n\n def update(self):\n _run(self._runner, \"%spkg update\" % self._sudo_str, output=self._output)\n\n def install(self, package_name):\n _run(self._runner, \"%spkg install -y %s\" % (self._sudo_str, package_name),\n output=self._output)\n\n def installed(self, package_name):\n exit_code = self._runner(\"pkg info %s\" % package_name, None)\n return exit_code == 0\n\n\nclass PkgUtilTool(BaseTool):\n def add_repository(self, repository, repo_key=None):\n raise ConanException(\"PkgUtilTool::add_repository not implemented\")\n\n def update(self):\n _run(self._runner, \"%spkgutil --catalog\" % self._sudo_str, output=self._output)\n\n def install(self, package_name):\n _run(self._runner, \"%spkgutil --install --yes %s\" % (self._sudo_str, package_name),\n output=self._output)\n\n def installed(self, package_name):\n exit_code = self._runner('test -n \"`pkgutil --list %s`\"' % package_name, None)\n return exit_code == 0\n\n\nclass ChocolateyTool(BaseTool):\n def add_repository(self, repository, repo_key=None):\n raise ConanException(\"ChocolateyTool::add_repository not implemented\")\n\n def update(self):\n _run(self._runner, \"choco outdated\", output=self._output)\n\n def install(self, package_name):\n _run(self._runner, \"choco install --yes %s\" % package_name, output=self._output)\n\n def installed(self, package_name):\n exit_code = self._runner('choco search --local-only --exact %s | '\n 'findstr /c:\"1 packages installed.\"' % package_name, None)\n return exit_code == 0\n\n\nclass PacManTool(BaseTool):\n def add_repository(self, repository, repo_key=None):\n raise ConanException(\"PacManTool::add_repository not implemented\")\n\n def update(self):\n _run(self._runner, \"%spacman -Syyu --noconfirm\" % self._sudo_str, output=self._output)\n\n def install(self, package_name):\n _run(self._runner, \"%spacman -S --noconfirm %s\" % (self._sudo_str, package_name),\n output=self._output)\n\n def installed(self, package_name):\n exit_code = self._runner(\"pacman -Qi %s\" % package_name, None)\n return exit_code == 0\n\n\nclass ZypperTool(BaseTool):\n def add_repository(self, repository, repo_key=None):\n raise ConanException(\"ZypperTool::add_repository not implemented\")\n\n def update(self):\n _run(self._runner, \"%szypper --non-interactive ref\" % self._sudo_str, output=self._output)\n\n def install(self, package_name):\n _run(self._runner, \"%szypper --non-interactive in %s\" % (self._sudo_str, package_name),\n output=self._output)\n\n def installed(self, package_name):\n exit_code = self._runner(\"rpm -q %s\" % package_name, None)\n return exit_code == 0\n\n\ndef _run(runner, command, output, accepted_returns=None):\n accepted_returns = accepted_returns or [0, ]\n output.info(\"Running: %s\" % command)\n if runner(command, True) not in accepted_returns:\n raise ConanException(\"Command '%s' failed\" % command)\n", "path": "conans/client/tools/system_pm.py"}]} | 4,046 | 152 |
gh_patches_debug_11910 | rasdani/github-patches | git_diff | web2py__web2py-1682 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
heroku ADAPTERS error
I'm looking to use Heroku for deployment of my web2py project. However, when I add
`from gluon.contrib.heroku import get_db`
`db = get_db(name=None, pool_size=myconf.get('db.pool_size'))`
I get a ticket with the error:
> File "/Users/huangyu/dev/web2py/gluon/contrib/heroku.py", line 10, in <module>
from pydal.adapters import ADAPTERS, PostgreSQLAdapter
ImportError: cannot import name ADAPTERS
It looks like web2py has moved on from using ADAPTERS? Has that been replaced by
`@adapters.register_for('postgres')`
But the heroku file has not been updated.
</issue>
<code>
[start of gluon/contrib/heroku.py]
1 """
2 Usage: in web2py models/db.py
3
4 from gluon.contrib.heroku import get_db
5 db = get_db()
6
7 """
8 import os
9 from gluon import *
10 from pydal.adapters import ADAPTERS, PostgreSQLAdapter
11 from pydal.helpers.classes import UseDatabaseStoredFile
12
13 class HerokuPostgresAdapter(UseDatabaseStoredFile,PostgreSQLAdapter):
14 drivers = ('psycopg2',)
15 uploads_in_blob = True
16
17 ADAPTERS['postgres'] = HerokuPostgresAdapter
18
19 def get_db(name = None, pool_size=10):
20 if not name:
21 names = [n for n in os.environ.keys()
22 if n[:18]+n[-4:]=='HEROKU_POSTGRESQL__URL']
23 if names:
24 name = names[0]
25 if name:
26 db = DAL(os.environ[name], pool_size=pool_size)
27 current.session.connect(current.request, current.response, db=db)
28 else:
29 db = DAL('sqlite://heroku.test.sqlite')
30 return db
31
[end of gluon/contrib/heroku.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gluon/contrib/heroku.py b/gluon/contrib/heroku.py
--- a/gluon/contrib/heroku.py
+++ b/gluon/contrib/heroku.py
@@ -7,15 +7,13 @@
"""
import os
from gluon import *
-from pydal.adapters import ADAPTERS, PostgreSQLAdapter
-from pydal.helpers.classes import UseDatabaseStoredFile
+from pydal.adapters import adapters, PostgrePsyco
+from pydal.helpers.classes import DatabaseStoredFile
-class HerokuPostgresAdapter(UseDatabaseStoredFile,PostgreSQLAdapter):
- drivers = ('psycopg2',)
[email protected]_for('postgres')
+class HerokuPostgresAdapter(DatabaseStoredFile, PostgrePsyco):
uploads_in_blob = True
-ADAPTERS['postgres'] = HerokuPostgresAdapter
-
def get_db(name = None, pool_size=10):
if not name:
names = [n for n in os.environ.keys()
| {"golden_diff": "diff --git a/gluon/contrib/heroku.py b/gluon/contrib/heroku.py\n--- a/gluon/contrib/heroku.py\n+++ b/gluon/contrib/heroku.py\n@@ -7,15 +7,13 @@\n \"\"\"\n import os\n from gluon import *\n-from pydal.adapters import ADAPTERS, PostgreSQLAdapter\n-from pydal.helpers.classes import UseDatabaseStoredFile\n+from pydal.adapters import adapters, PostgrePsyco\n+from pydal.helpers.classes import DatabaseStoredFile\n \n-class HerokuPostgresAdapter(UseDatabaseStoredFile,PostgreSQLAdapter):\n- drivers = ('psycopg2',)\[email protected]_for('postgres')\n+class HerokuPostgresAdapter(DatabaseStoredFile, PostgrePsyco):\n uploads_in_blob = True\n \n-ADAPTERS['postgres'] = HerokuPostgresAdapter\n-\n def get_db(name = None, pool_size=10):\n if not name:\n names = [n for n in os.environ.keys()\n", "issue": "heroku ADAPTERS error\nI'm looking to use Heroku for deployment of my web2py project. However, when I add \r\n`from gluon.contrib.heroku import get_db`\r\n`db = get_db(name=None, pool_size=myconf.get('db.pool_size'))`\r\n\r\nI get a ticket with the error:\r\n\r\n> File \"/Users/huangyu/dev/web2py/gluon/contrib/heroku.py\", line 10, in <module>\r\n from pydal.adapters import ADAPTERS, PostgreSQLAdapter\r\nImportError: cannot import name ADAPTERS\r\n\r\nIt looks like web2py has moved on from using ADAPTERS? Has that been replaced by \r\n`@adapters.register_for('postgres')`\r\n\r\nBut the heroku file has not been updated. \n", "before_files": [{"content": "\"\"\"\nUsage: in web2py models/db.py\n\nfrom gluon.contrib.heroku import get_db\ndb = get_db()\n\n\"\"\"\nimport os\nfrom gluon import *\nfrom pydal.adapters import ADAPTERS, PostgreSQLAdapter\nfrom pydal.helpers.classes import UseDatabaseStoredFile\n\nclass HerokuPostgresAdapter(UseDatabaseStoredFile,PostgreSQLAdapter):\n drivers = ('psycopg2',)\n uploads_in_blob = True\n\nADAPTERS['postgres'] = HerokuPostgresAdapter\n\ndef get_db(name = None, pool_size=10):\n if not name:\n names = [n for n in os.environ.keys()\n if n[:18]+n[-4:]=='HEROKU_POSTGRESQL__URL']\n if names:\n name = names[0]\n if name:\n db = DAL(os.environ[name], pool_size=pool_size)\n current.session.connect(current.request, current.response, db=db)\n else:\n db = DAL('sqlite://heroku.test.sqlite')\n return db\n", "path": "gluon/contrib/heroku.py"}]} | 972 | 214 |
gh_patches_debug_34290 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-306 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Switch unit tests from `unittest.mock` to SDK & in-memory exporter
See https://github.com/open-telemetry/opentelemetry-python/pull/290#issuecomment-558091283.
Currently tests are cumbersome to write and actually we probably don't want to test which API calls are made but what Spans would result in most cases. For this a SDK with in-memory exporter would be better than using `unittest.mock`.
</issue>
<code>
[start of ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py]
1 import io
2 import unittest
3 import unittest.mock as mock
4 import wsgiref.util as wsgiref_util
5
6 from opentelemetry import trace as trace_api
7
8
9 class WsgiTestBase(unittest.TestCase):
10 def setUp(self):
11 self.span = mock.create_autospec(trace_api.Span, spec_set=True)
12 tracer = trace_api.Tracer()
13 self.get_tracer_patcher = mock.patch.object(
14 trace_api.TracerSource,
15 "get_tracer",
16 autospec=True,
17 spec_set=True,
18 return_value=tracer,
19 )
20 self.get_tracer_patcher.start()
21
22 self.start_span_patcher = mock.patch.object(
23 tracer,
24 "start_span",
25 autospec=True,
26 spec_set=True,
27 return_value=self.span,
28 )
29 self.start_span = self.start_span_patcher.start()
30 self.write_buffer = io.BytesIO()
31 self.write = self.write_buffer.write
32
33 self.environ = {}
34 wsgiref_util.setup_testing_defaults(self.environ)
35
36 self.status = None
37 self.response_headers = None
38 self.exc_info = None
39
40 def tearDown(self):
41 self.get_tracer_patcher.stop()
42 self.start_span_patcher.stop()
43
44 def start_response(self, status, response_headers, exc_info=None):
45 self.status = status
46 self.response_headers = response_headers
47 self.exc_info = exc_info
48 return self.write
49
[end of ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py b/ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py
--- a/ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py
+++ b/ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py
@@ -1,32 +1,38 @@
import io
import unittest
-import unittest.mock as mock
import wsgiref.util as wsgiref_util
+from importlib import reload
from opentelemetry import trace as trace_api
+from opentelemetry.sdk.trace import TracerSource, export
+from opentelemetry.sdk.trace.export.in_memory_span_exporter import (
+ InMemorySpanExporter,
+)
+
+_MEMORY_EXPORTER = None
class WsgiTestBase(unittest.TestCase):
- def setUp(self):
- self.span = mock.create_autospec(trace_api.Span, spec_set=True)
- tracer = trace_api.Tracer()
- self.get_tracer_patcher = mock.patch.object(
- trace_api.TracerSource,
- "get_tracer",
- autospec=True,
- spec_set=True,
- return_value=tracer,
- )
- self.get_tracer_patcher.start()
-
- self.start_span_patcher = mock.patch.object(
- tracer,
- "start_span",
- autospec=True,
- spec_set=True,
- return_value=self.span,
+ @classmethod
+ def setUpClass(cls):
+ global _MEMORY_EXPORTER # pylint:disable=global-statement
+ trace_api.set_preferred_tracer_source_implementation(
+ lambda T: TracerSource()
)
- self.start_span = self.start_span_patcher.start()
+ tracer_source = trace_api.tracer_source()
+ _MEMORY_EXPORTER = InMemorySpanExporter()
+ span_processor = export.SimpleExportSpanProcessor(_MEMORY_EXPORTER)
+ tracer_source.add_span_processor(span_processor)
+
+ @classmethod
+ def tearDownClass(cls):
+ reload(trace_api)
+
+ def setUp(self):
+
+ self.memory_exporter = _MEMORY_EXPORTER
+ self.memory_exporter.clear()
+
self.write_buffer = io.BytesIO()
self.write = self.write_buffer.write
@@ -37,10 +43,6 @@
self.response_headers = None
self.exc_info = None
- def tearDown(self):
- self.get_tracer_patcher.stop()
- self.start_span_patcher.stop()
-
def start_response(self, status, response_headers, exc_info=None):
self.status = status
self.response_headers = response_headers
| {"golden_diff": "diff --git a/ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py b/ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py\n--- a/ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py\n+++ b/ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py\n@@ -1,32 +1,38 @@\n import io\n import unittest\n-import unittest.mock as mock\n import wsgiref.util as wsgiref_util\n+from importlib import reload\n \n from opentelemetry import trace as trace_api\n+from opentelemetry.sdk.trace import TracerSource, export\n+from opentelemetry.sdk.trace.export.in_memory_span_exporter import (\n+ InMemorySpanExporter,\n+)\n+\n+_MEMORY_EXPORTER = None\n \n \n class WsgiTestBase(unittest.TestCase):\n- def setUp(self):\n- self.span = mock.create_autospec(trace_api.Span, spec_set=True)\n- tracer = trace_api.Tracer()\n- self.get_tracer_patcher = mock.patch.object(\n- trace_api.TracerSource,\n- \"get_tracer\",\n- autospec=True,\n- spec_set=True,\n- return_value=tracer,\n- )\n- self.get_tracer_patcher.start()\n-\n- self.start_span_patcher = mock.patch.object(\n- tracer,\n- \"start_span\",\n- autospec=True,\n- spec_set=True,\n- return_value=self.span,\n+ @classmethod\n+ def setUpClass(cls):\n+ global _MEMORY_EXPORTER # pylint:disable=global-statement\n+ trace_api.set_preferred_tracer_source_implementation(\n+ lambda T: TracerSource()\n )\n- self.start_span = self.start_span_patcher.start()\n+ tracer_source = trace_api.tracer_source()\n+ _MEMORY_EXPORTER = InMemorySpanExporter()\n+ span_processor = export.SimpleExportSpanProcessor(_MEMORY_EXPORTER)\n+ tracer_source.add_span_processor(span_processor)\n+\n+ @classmethod\n+ def tearDownClass(cls):\n+ reload(trace_api)\n+\n+ def setUp(self):\n+\n+ self.memory_exporter = _MEMORY_EXPORTER\n+ self.memory_exporter.clear()\n+\n self.write_buffer = io.BytesIO()\n self.write = self.write_buffer.write\n \n@@ -37,10 +43,6 @@\n self.response_headers = None\n self.exc_info = None\n \n- def tearDown(self):\n- self.get_tracer_patcher.stop()\n- self.start_span_patcher.stop()\n-\n def start_response(self, status, response_headers, exc_info=None):\n self.status = status\n self.response_headers = response_headers\n", "issue": "Switch unit tests from `unittest.mock` to SDK & in-memory exporter\nSee https://github.com/open-telemetry/opentelemetry-python/pull/290#issuecomment-558091283.\r\nCurrently tests are cumbersome to write and actually we probably don't want to test which API calls are made but what Spans would result in most cases. For this a SDK with in-memory exporter would be better than using `unittest.mock`.\n", "before_files": [{"content": "import io\nimport unittest\nimport unittest.mock as mock\nimport wsgiref.util as wsgiref_util\n\nfrom opentelemetry import trace as trace_api\n\n\nclass WsgiTestBase(unittest.TestCase):\n def setUp(self):\n self.span = mock.create_autospec(trace_api.Span, spec_set=True)\n tracer = trace_api.Tracer()\n self.get_tracer_patcher = mock.patch.object(\n trace_api.TracerSource,\n \"get_tracer\",\n autospec=True,\n spec_set=True,\n return_value=tracer,\n )\n self.get_tracer_patcher.start()\n\n self.start_span_patcher = mock.patch.object(\n tracer,\n \"start_span\",\n autospec=True,\n spec_set=True,\n return_value=self.span,\n )\n self.start_span = self.start_span_patcher.start()\n self.write_buffer = io.BytesIO()\n self.write = self.write_buffer.write\n\n self.environ = {}\n wsgiref_util.setup_testing_defaults(self.environ)\n\n self.status = None\n self.response_headers = None\n self.exc_info = None\n\n def tearDown(self):\n self.get_tracer_patcher.stop()\n self.start_span_patcher.stop()\n\n def start_response(self, status, response_headers, exc_info=None):\n self.status = status\n self.response_headers = response_headers\n self.exc_info = exc_info\n return self.write\n", "path": "ext/opentelemetry-ext-testutil/src/opentelemetry/ext/testutil/wsgitestutil.py"}]} | 1,047 | 593 |
gh_patches_debug_7939 | rasdani/github-patches | git_diff | mozilla__bugbug-3401 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Refactor logging statements to use lazy % formatting
Example of logging statements that we want to refactor:
https://github.com/mozilla/bugbug/blob/d53595391dbd75379bb49bff12dee4821e4b956c/bugbug/github.py#L61
https://github.com/mozilla/bugbug/blob/69972a1684f788319bf5c2944bbe8eeb79428c7d/scripts/regressor_finder.py#L396
More details can be found in the [pylint docs](https://pylint.readthedocs.io/en/latest/user_guide/messages/warning/logging-fstring-interpolation.html).
</issue>
<code>
[start of scripts/retrieve_training_metrics.py]
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import argparse
7 import logging
8 import os
9 import sys
10 from os.path import abspath, join
11
12 import requests
13 import taskcluster
14
15 from bugbug.utils import get_taskcluster_options
16
17 ROOT_URI = "train_{}.per_date"
18 DATE_URI = "train_{}.per_date.{}"
19 BASE_URL = "https://community-tc.services.mozilla.com/api/index/v1/task/{}/artifacts/public/metrics.json"
20 NAMESPACE_URI = "project.bugbug.{}"
21
22 LOGGER = logging.getLogger(__name__)
23
24 logging.basicConfig(level=logging.INFO)
25
26
27 def get_task_metrics_from_uri(index_uri):
28 index_url = BASE_URL.format(index_uri)
29 LOGGER.info("Retrieving metrics from %s", index_url)
30 r = requests.get(index_url)
31
32 if r.status_code == 404:
33 LOGGER.error(f"File not found for URL {index_url}, check your arguments")
34 sys.exit(1)
35
36 r.raise_for_status()
37
38 return r
39
40
41 def get_namespaces(index, index_uri):
42 index_namespaces = index.listNamespaces(index_uri)
43
44 return index_namespaces["namespaces"]
45
46
47 def is_later_or_equal(partial_date, from_date):
48 for partial_date_part, from_date_part in zip(partial_date, from_date):
49 if int(partial_date_part) > int(from_date_part):
50 return True
51 elif int(partial_date_part) < int(from_date_part):
52 return False
53 else:
54 continue
55
56 return True
57
58
59 def get_task_metrics_from_date(model, date, output_directory):
60 options = get_taskcluster_options()
61
62 index = taskcluster.Index(options)
63
64 index.ping()
65
66 # Split the date
67 from_date = date.split(".")
68
69 namespaces = []
70
71 # Start at the root level
72 # We need an empty list in order to append namespaces part to it
73 namespaces.append([])
74
75 # Recursively list all namespaces greater or equals than the given date
76 while namespaces:
77 current_ns = namespaces.pop()
78
79 # Handle version level namespaces
80 if not current_ns:
81 ns_uri = ROOT_URI.format(model)
82 else:
83 current_ns_date = ".".join(current_ns)
84 ns_uri = DATE_URI.format(model, current_ns_date)
85
86 ns_full_uri = NAMESPACE_URI.format(ns_uri)
87
88 tasks = index.listTasks(ns_full_uri)
89 for task in tasks["tasks"]:
90 task_uri = task["namespace"]
91 r = get_task_metrics_from_uri(task_uri)
92
93 # Write the file on disk
94 file_name = f"metric_{'_'.join(task_uri.split('.'))}.json"
95 file_path = abspath(join(output_directory, file_name))
96 with open(file_path, "w") as metric_file:
97 metric_file.write(r.text)
98 LOGGER.info(f"Metrics saved to {file_path!r}")
99
100 for namespace in get_namespaces(index, ns_full_uri):
101 new_ns = current_ns.copy()
102 new_ns.append(namespace["name"])
103
104 if not is_later_or_equal(new_ns, from_date):
105 LOGGER.debug("NEW namespace %s is before %s", new_ns, from_date)
106 continue
107
108 # Might not be efficient but size of `namespaces` shouldn't be too
109 # big as we are doing a depth-first traversal
110 if new_ns not in namespaces:
111 namespaces.append(new_ns)
112
113
114 def main():
115 description = "Retrieve a model training metrics"
116 parser = argparse.ArgumentParser(description=description)
117
118 parser.add_argument(
119 "-d",
120 "--output-directory",
121 default=os.getcwd(),
122 help="In which directory the script should save the metrics file. The directory must exists",
123 )
124 parser.add_argument("model", help="Which model to retrieve training metrics from.")
125 parser.add_argument(
126 "date",
127 nargs="?",
128 help="Which date should we retrieve training metrics from. Default to latest",
129 )
130
131 args = parser.parse_args()
132
133 get_task_metrics_from_date(args.model, args.date, args.output_directory)
134
135
136 if __name__ == "__main__":
137 main()
138
[end of scripts/retrieve_training_metrics.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/retrieve_training_metrics.py b/scripts/retrieve_training_metrics.py
--- a/scripts/retrieve_training_metrics.py
+++ b/scripts/retrieve_training_metrics.py
@@ -95,7 +95,7 @@
file_path = abspath(join(output_directory, file_name))
with open(file_path, "w") as metric_file:
metric_file.write(r.text)
- LOGGER.info(f"Metrics saved to {file_path!r}")
+ LOGGER.info("Metrics saved to %r", file_path)
for namespace in get_namespaces(index, ns_full_uri):
new_ns = current_ns.copy()
| {"golden_diff": "diff --git a/scripts/retrieve_training_metrics.py b/scripts/retrieve_training_metrics.py\n--- a/scripts/retrieve_training_metrics.py\n+++ b/scripts/retrieve_training_metrics.py\n@@ -95,7 +95,7 @@\n file_path = abspath(join(output_directory, file_name))\n with open(file_path, \"w\") as metric_file:\n metric_file.write(r.text)\n- LOGGER.info(f\"Metrics saved to {file_path!r}\")\n+ LOGGER.info(\"Metrics saved to %r\", file_path)\n \n for namespace in get_namespaces(index, ns_full_uri):\n new_ns = current_ns.copy()\n", "issue": "Refactor logging statements to use lazy % formatting\nExample of logging statements that we want to refactor:\r\n\r\nhttps://github.com/mozilla/bugbug/blob/d53595391dbd75379bb49bff12dee4821e4b956c/bugbug/github.py#L61\r\n\r\nhttps://github.com/mozilla/bugbug/blob/69972a1684f788319bf5c2944bbe8eeb79428c7d/scripts/regressor_finder.py#L396\r\n\r\nMore details can be found in the [pylint docs](https://pylint.readthedocs.io/en/latest/user_guide/messages/warning/logging-fstring-interpolation.html).\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport argparse\nimport logging\nimport os\nimport sys\nfrom os.path import abspath, join\n\nimport requests\nimport taskcluster\n\nfrom bugbug.utils import get_taskcluster_options\n\nROOT_URI = \"train_{}.per_date\"\nDATE_URI = \"train_{}.per_date.{}\"\nBASE_URL = \"https://community-tc.services.mozilla.com/api/index/v1/task/{}/artifacts/public/metrics.json\"\nNAMESPACE_URI = \"project.bugbug.{}\"\n\nLOGGER = logging.getLogger(__name__)\n\nlogging.basicConfig(level=logging.INFO)\n\n\ndef get_task_metrics_from_uri(index_uri):\n index_url = BASE_URL.format(index_uri)\n LOGGER.info(\"Retrieving metrics from %s\", index_url)\n r = requests.get(index_url)\n\n if r.status_code == 404:\n LOGGER.error(f\"File not found for URL {index_url}, check your arguments\")\n sys.exit(1)\n\n r.raise_for_status()\n\n return r\n\n\ndef get_namespaces(index, index_uri):\n index_namespaces = index.listNamespaces(index_uri)\n\n return index_namespaces[\"namespaces\"]\n\n\ndef is_later_or_equal(partial_date, from_date):\n for partial_date_part, from_date_part in zip(partial_date, from_date):\n if int(partial_date_part) > int(from_date_part):\n return True\n elif int(partial_date_part) < int(from_date_part):\n return False\n else:\n continue\n\n return True\n\n\ndef get_task_metrics_from_date(model, date, output_directory):\n options = get_taskcluster_options()\n\n index = taskcluster.Index(options)\n\n index.ping()\n\n # Split the date\n from_date = date.split(\".\")\n\n namespaces = []\n\n # Start at the root level\n # We need an empty list in order to append namespaces part to it\n namespaces.append([])\n\n # Recursively list all namespaces greater or equals than the given date\n while namespaces:\n current_ns = namespaces.pop()\n\n # Handle version level namespaces\n if not current_ns:\n ns_uri = ROOT_URI.format(model)\n else:\n current_ns_date = \".\".join(current_ns)\n ns_uri = DATE_URI.format(model, current_ns_date)\n\n ns_full_uri = NAMESPACE_URI.format(ns_uri)\n\n tasks = index.listTasks(ns_full_uri)\n for task in tasks[\"tasks\"]:\n task_uri = task[\"namespace\"]\n r = get_task_metrics_from_uri(task_uri)\n\n # Write the file on disk\n file_name = f\"metric_{'_'.join(task_uri.split('.'))}.json\"\n file_path = abspath(join(output_directory, file_name))\n with open(file_path, \"w\") as metric_file:\n metric_file.write(r.text)\n LOGGER.info(f\"Metrics saved to {file_path!r}\")\n\n for namespace in get_namespaces(index, ns_full_uri):\n new_ns = current_ns.copy()\n new_ns.append(namespace[\"name\"])\n\n if not is_later_or_equal(new_ns, from_date):\n LOGGER.debug(\"NEW namespace %s is before %s\", new_ns, from_date)\n continue\n\n # Might not be efficient but size of `namespaces` shouldn't be too\n # big as we are doing a depth-first traversal\n if new_ns not in namespaces:\n namespaces.append(new_ns)\n\n\ndef main():\n description = \"Retrieve a model training metrics\"\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\n \"-d\",\n \"--output-directory\",\n default=os.getcwd(),\n help=\"In which directory the script should save the metrics file. The directory must exists\",\n )\n parser.add_argument(\"model\", help=\"Which model to retrieve training metrics from.\")\n parser.add_argument(\n \"date\",\n nargs=\"?\",\n help=\"Which date should we retrieve training metrics from. Default to latest\",\n )\n\n args = parser.parse_args()\n\n get_task_metrics_from_date(args.model, args.date, args.output_directory)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "scripts/retrieve_training_metrics.py"}]} | 1,945 | 132 |
gh_patches_debug_29495 | rasdani/github-patches | git_diff | bridgecrewio__checkov-1215 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
False positive for CKV_AWS_143 ("Ensure S3 bucket has lock configuration enabled by default")
**Describe the bug**
Checkov expects argument `object_lock_configuration` to be an object, i.e.
```hcl
object_lock_configuration = {
object_lock_enabled = "Enabled"
}
```
Terraform works with the above configuration, but when also declaring rules for the object lock configuration, it expects a block instead, e.g.
```hcl
object_lock_configuration {
object_lock_enabled = "Enabled"
rule {
default_retention {
mode = "GOVERNANCE"
days = 366
}
}
}
```
**Expected behavior**
Checkov should pass for a `object_lock_configuration` argument block.
**Desktop (please complete the following information):**
- OS: macOS Big Sur 11.3.1
- Checkov Version: 2.0.135
- Terraform version: v0.14.8
</issue>
<code>
[start of checkov/terraform/checks/resource/aws/S3BucketObjectLock.py]
1 from checkov.common.models.enums import CheckCategories, CheckResult
2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck
3
4
5 class S3BucketObjectLock(BaseResourceCheck):
6 def __init__(self):
7 name = "Ensure that S3 bucket has lock configuration enabled by default"
8 id = "CKV_AWS_143"
9 supported_resources = ['aws_s3_bucket']
10 categories = [CheckCategories.GENERAL_SECURITY]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def scan_resource_conf(self, conf):
14 if 'object_lock_configuration' in conf:
15 if 'object_lock_enabled' in conf['object_lock_configuration'][0]:
16 lock = conf['object_lock_configuration'][0]['object_lock_enabled']
17 if lock == "Enabled":
18 return CheckResult.PASSED
19 else:
20 return CheckResult.FAILED
21 else:
22 return CheckResult.PASSED
23
24
25 check = S3BucketObjectLock()
26
[end of checkov/terraform/checks/resource/aws/S3BucketObjectLock.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/resource/aws/S3BucketObjectLock.py b/checkov/terraform/checks/resource/aws/S3BucketObjectLock.py
--- a/checkov/terraform/checks/resource/aws/S3BucketObjectLock.py
+++ b/checkov/terraform/checks/resource/aws/S3BucketObjectLock.py
@@ -1,25 +1,26 @@
+from typing import Dict, List, Any
+
from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck
class S3BucketObjectLock(BaseResourceCheck):
- def __init__(self):
+ def __init__(self) -> None:
name = "Ensure that S3 bucket has lock configuration enabled by default"
id = "CKV_AWS_143"
- supported_resources = ['aws_s3_bucket']
+ supported_resources = ["aws_s3_bucket"]
categories = [CheckCategories.GENERAL_SECURITY]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
- def scan_resource_conf(self, conf):
- if 'object_lock_configuration' in conf:
- if 'object_lock_enabled' in conf['object_lock_configuration'][0]:
- lock = conf['object_lock_configuration'][0]['object_lock_enabled']
- if lock == "Enabled":
- return CheckResult.PASSED
- else:
- return CheckResult.FAILED
- else:
- return CheckResult.PASSED
+ def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:
+ lock_conf = conf.get("object_lock_configuration")
+ if lock_conf and lock_conf[0]:
+ lock_enabled = lock_conf[0].get("object_lock_enabled")
+ if lock_enabled in ["Enabled", ["Enabled"]]:
+ return CheckResult.PASSED
+ return CheckResult.FAILED
+
+ return CheckResult.UNKNOWN
check = S3BucketObjectLock()
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/S3BucketObjectLock.py b/checkov/terraform/checks/resource/aws/S3BucketObjectLock.py\n--- a/checkov/terraform/checks/resource/aws/S3BucketObjectLock.py\n+++ b/checkov/terraform/checks/resource/aws/S3BucketObjectLock.py\n@@ -1,25 +1,26 @@\n+from typing import Dict, List, Any\n+\n from checkov.common.models.enums import CheckCategories, CheckResult\n from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck\n \n \n class S3BucketObjectLock(BaseResourceCheck):\n- def __init__(self):\n+ def __init__(self) -> None:\n name = \"Ensure that S3 bucket has lock configuration enabled by default\"\n id = \"CKV_AWS_143\"\n- supported_resources = ['aws_s3_bucket']\n+ supported_resources = [\"aws_s3_bucket\"]\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n- def scan_resource_conf(self, conf):\n- if 'object_lock_configuration' in conf:\n- if 'object_lock_enabled' in conf['object_lock_configuration'][0]:\n- lock = conf['object_lock_configuration'][0]['object_lock_enabled']\n- if lock == \"Enabled\":\n- return CheckResult.PASSED\n- else:\n- return CheckResult.FAILED\n- else:\n- return CheckResult.PASSED\n+ def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:\n+ lock_conf = conf.get(\"object_lock_configuration\")\n+ if lock_conf and lock_conf[0]:\n+ lock_enabled = lock_conf[0].get(\"object_lock_enabled\")\n+ if lock_enabled in [\"Enabled\", [\"Enabled\"]]:\n+ return CheckResult.PASSED\n+ return CheckResult.FAILED\n+\n+ return CheckResult.UNKNOWN\n \n \n check = S3BucketObjectLock()\n", "issue": "False positive for CKV_AWS_143 (\"Ensure S3 bucket has lock configuration enabled by default\")\n**Describe the bug**\r\n\r\nCheckov expects argument `object_lock_configuration` to be an object, i.e.\r\n\r\n```hcl\r\nobject_lock_configuration = {\r\n object_lock_enabled = \"Enabled\"\r\n}\r\n```\r\n\r\nTerraform works with the above configuration, but when also declaring rules for the object lock configuration, it expects a block instead, e.g.\r\n```hcl\r\nobject_lock_configuration {\r\n object_lock_enabled = \"Enabled\"\r\n\r\n rule {\r\n default_retention {\r\n mode = \"GOVERNANCE\"\r\n days = 366\r\n }\r\n }\r\n}\r\n```\r\n\r\n**Expected behavior**\r\nCheckov should pass for a `object_lock_configuration` argument block.\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: macOS Big Sur 11.3.1\r\n - Checkov Version: 2.0.135\r\n - Terraform version: v0.14.8\r\n\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck\n\n\nclass S3BucketObjectLock(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure that S3 bucket has lock configuration enabled by default\"\n id = \"CKV_AWS_143\"\n supported_resources = ['aws_s3_bucket']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if 'object_lock_configuration' in conf:\n if 'object_lock_enabled' in conf['object_lock_configuration'][0]:\n lock = conf['object_lock_configuration'][0]['object_lock_enabled']\n if lock == \"Enabled\":\n return CheckResult.PASSED\n else:\n return CheckResult.FAILED\n else:\n return CheckResult.PASSED\n\n\ncheck = S3BucketObjectLock()\n", "path": "checkov/terraform/checks/resource/aws/S3BucketObjectLock.py"}]} | 1,028 | 439 |
gh_patches_debug_39587 | rasdani/github-patches | git_diff | ansible-collections__amazon.aws-1980 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`lookup` environment variables `aws_ec2.yml` file
### Summary
I'm trying to `lookup` environment variables in `aws_ec2.yml` file but the `lookup` seems to be not working as expected, is this something intentionally not allowed?
### Issue Type
Bug Report
### Component Name
ec2_instnace
### Ansible Version
```console (paste below)
$ ansible --version
ansible [core 2.15.2]
config file = /Users/<user>/.ansible.cfg
configured module search path = ['/Users/<user>/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /opt/homebrew/lib/python3.11/site-packages/ansible
ansible collection location = /Users/<user>/.ansible/collections:/usr/share/ansible/collections
executable location = /opt/homebrew/bin/ansible
python version = 3.11.6 (main, Oct 2 2023, 20:46:14) [Clang 14.0.3 (clang-1403.0.22.14.1)] (/opt/homebrew/opt/[email protected]/bin/python3.11)
jinja version = 3.1.2
libyaml = True
```
### Collection Versions
```console (paste below)
$ ansible-galaxy collection list
# /Users/<user>/.ansible/collections/ansible_collections
Collection Version
--------------------- -------
amazon.aws 6.5.0
ansible.windows 2.0.0
awx.awx 21.0.0
chocolatey.chocolatey 1.5.1
community.mysql 3.7.2
community.windows 2.0.0
lowlydba.sqlserver 0.7.0
microsoft.ad 1.4.1 ****
```
### AWS SDK versions
```console (paste below)
$ pip show boto boto3 botocore
Name: boto3
Version: 1.28.21
Summary: The AWS SDK for Python
Home-page: https://github.com/boto/boto3
Author: Amazon Web Services
Author-email:
License: Apache License 2.0
Location: /opt/homebrew/lib/python3.11/site-packages
Requires: botocore, jmespath, s3transfer
Required-by:
---
Name: botocore
Version: 1.31.21
Summary: Low-level, data-driven core of boto 3.
Home-page: https://github.com/boto/botocore
Author: Amazon Web Services
Author-email:
License: Apache License 2.0
Location: /opt/homebrew/lib/python3.11/site-packages
Requires: jmespath, python-dateutil, urllib3
Required-by: aws-secretsmanager-caching, boto3, **s3transfer**
```
### Configuration
```console (paste below)
$ ansible-config dump --only-changed
CONFIG_FILE() = /Users/<user>/.ansible.cfg
PAGER(env: PAGER) = less
```
### OS / Environment
macOS Ventura Version 13.6.2 (22G320)
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
plugin: aws_ec2
assume_role_arn: "arn:aws:iam::{{ lookup('env', 'AWS_ACCOUNT_ID') | default('XXXXXXXXXXX', true) }}:role/ansible-manager"
regions:
- "{{ lookup('env', 'AWS_REGION') | default('us-east-1', true) }}"
```
i have this `yml` file which is something i was expecting it would lookup the env variables and get the authorization
### Expected Results
Expected result that `lookup` would happen and assume role/region would get set right, instead it is being parsed as plain text and `lookup` is not being recognized is what i understand looking at the result below
### Actual Results
```console (paste below)
aws_ec2.yml with auto plugin: An error occurred (AccessDenied) when calling the AssumeRole operation: User:
<UserName> is not
authorized to perform: sts:AssumeRole on resource: arn:aws:iam::{{ lookup('ansible.builtin.env', 'AWS_ACCOUNT_ID') |
default('XXXXXXXXXXX', True) }}:role/ansible-manager
aws_ec2.yml with auto plugin: Couldn't connect to AWS: Provided region_name '{{ lookup('env', 'AWS_REGION') |
default('us-east-1', true) }}' doesn't match a supported format.
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
</issue>
<code>
[start of plugins/plugin_utils/inventory.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright: (c) 2022, Ansible Project
4 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
5
6 try:
7 import boto3
8 import botocore
9 except ImportError:
10 pass # will be captured by imported HAS_BOTO3
11
12 from ansible.plugins.inventory import BaseInventoryPlugin
13 from ansible.plugins.inventory import Cacheable
14 from ansible.plugins.inventory import Constructable
15
16 from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
17 from ansible_collections.amazon.aws.plugins.plugin_utils.base import AWSPluginBase
18 from ansible_collections.amazon.aws.plugins.plugin_utils.botocore import AnsibleBotocoreError
19
20
21 def _boto3_session(profile_name=None):
22 if profile_name is None:
23 return boto3.Session()
24 return boto3.session.Session(profile_name=profile_name)
25
26
27 class AWSInventoryBase(BaseInventoryPlugin, Constructable, Cacheable, AWSPluginBase):
28 class TemplatedOptions:
29 # When someone looks up the TEMPLATABLE_OPTIONS using get() any templates
30 # will be templated using the loader passed to parse.
31 TEMPLATABLE_OPTIONS = (
32 "access_key",
33 "secret_key",
34 "session_token",
35 "profile",
36 "iam_role_name",
37 )
38
39 def __init__(self, templar, options):
40 self.original_options = options
41 self.templar = templar
42
43 def __getitem__(self, *args):
44 return self.original_options.__getitem__(self, *args)
45
46 def __setitem__(self, *args):
47 return self.original_options.__setitem__(self, *args)
48
49 def get(self, *args):
50 value = self.original_options.get(*args)
51 if not value:
52 return value
53 if args[0] not in self.TEMPLATABLE_OPTIONS:
54 return value
55 if not self.templar.is_template(value):
56 return value
57
58 return self.templar.template(variable=value, disable_lookups=False)
59
60 def get_options(self, *args):
61 original_options = super().get_options(*args)
62 if not self.templar:
63 return original_options
64 return self.TemplatedOptions(self.templar, original_options)
65
66 def __init__(self):
67 super().__init__()
68 self._frozen_credentials = {}
69
70 # pylint: disable=too-many-arguments
71 def parse(self, inventory, loader, path, cache=True, botocore_version=None, boto3_version=None):
72 super().parse(inventory, loader, path)
73 self.require_aws_sdk(botocore_version=botocore_version, boto3_version=boto3_version)
74 self._read_config_data(path)
75 self._set_frozen_credentials()
76
77 def client(self, *args, **kwargs):
78 kw_args = dict(self._frozen_credentials)
79 kw_args.update(kwargs)
80 return super().client(*args, **kw_args)
81
82 def resource(self, *args, **kwargs):
83 kw_args = dict(self._frozen_credentials)
84 kw_args.update(kwargs)
85 return super().resource(*args, **kw_args)
86
87 def _freeze_iam_role(self, iam_role_arn):
88 if hasattr(self, "ansible_name"):
89 role_session_name = f"ansible_aws_{self.ansible_name}_dynamic_inventory"
90 else:
91 role_session_name = "ansible_aws_dynamic_inventory"
92 assume_params = {"RoleArn": iam_role_arn, "RoleSessionName": role_session_name}
93
94 try:
95 sts = self.client("sts")
96 assumed_role = sts.assume_role(**assume_params)
97 except AnsibleBotocoreError as e:
98 self.fail_aws(f"Unable to assume role {iam_role_arn}", exception=e)
99
100 credentials = assumed_role.get("Credentials")
101 if not credentials:
102 self.fail_aws(f"Unable to assume role {iam_role_arn}")
103
104 self._frozen_credentials = {
105 "profile_name": None,
106 "aws_access_key_id": credentials.get("AccessKeyId"),
107 "aws_secret_access_key": credentials.get("SecretAccessKey"),
108 "aws_session_token": credentials.get("SessionToken"),
109 }
110
111 def _set_frozen_credentials(self):
112 options = self.get_options()
113 iam_role_arn = options.get("assume_role_arn")
114 if iam_role_arn:
115 self._freeze_iam_role(iam_role_arn)
116
117 def _describe_regions(self, service):
118 # Try pulling a list of regions from the service
119 try:
120 initial_region = self.region or "us-east-1"
121 client = self.client(service, region=initial_region)
122 resp = client.describe_regions()
123 except AttributeError:
124 # Not all clients support describe
125 pass
126 except is_boto3_error_code("UnauthorizedOperation"):
127 self.warn(f"UnauthorizedOperation when trying to list {service} regions")
128 except botocore.exceptions.NoRegionError:
129 self.warn(f"NoRegionError when trying to list {service} regions")
130 except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
131 self.warn(f"Unexpected error while trying to list {service} regions: {e}")
132 else:
133 regions = [x["RegionName"] for x in resp.get("Regions", [])]
134 if regions:
135 return regions
136 return None
137
138 def _boto3_regions(self, service):
139 options = self.get_options()
140
141 if options.get("regions"):
142 return options.get("regions")
143
144 # boto3 has hard coded lists of available regions for resources, however this does bit-rot
145 # As such we try to query the service, and fall back to ec2 for a list of regions
146 for resource_type in list({service, "ec2"}):
147 regions = self._describe_regions(resource_type)
148 if regions:
149 return regions
150
151 # fallback to local list hardcoded in boto3 if still no regions
152 session = _boto3_session(options.get("profile"))
153 regions = session.get_available_regions(service)
154
155 if not regions:
156 # I give up, now you MUST give me regions
157 self.fail_aws(
158 "Unable to get regions list from available methods, you must specify the 'regions' option to continue."
159 )
160
161 return regions
162
163 def all_clients(self, service):
164 """
165 Generator that yields a boto3 client and the region
166
167 :param service: The boto3 service to connect to.
168
169 Note: For services which don't support 'DescribeRegions' this may include bad
170 endpoints, and as such EndpointConnectionError should be cleanly handled as a non-fatal
171 error.
172 """
173 regions = self._boto3_regions(service=service)
174
175 for region in regions:
176 connection = self.client(service, region=region)
177 yield connection, region
178
179 def get_cached_result(self, path, cache):
180 # false when refresh_cache or --flush-cache is used
181 if not cache:
182 return False, None
183 # get the user-specified directive
184 if not self.get_option("cache"):
185 return False, None
186
187 cache_key = self.get_cache_key(path)
188 try:
189 cached_value = self._cache[cache_key]
190 except KeyError:
191 # if cache expires or cache file doesn"t exist
192 return False, None
193
194 return True, cached_value
195
196 def update_cached_result(self, path, cache, result):
197 if not self.get_option("cache"):
198 return
199
200 cache_key = self.get_cache_key(path)
201 # We weren't explicitly told to flush the cache, and there's already a cache entry,
202 # this means that the result we're being passed came from the cache. As such we don't
203 # want to "update" the cache as that could reset a TTL on the cache entry.
204 if cache and cache_key in self._cache:
205 return
206
207 self._cache[cache_key] = result
208
209 def verify_file(self, path):
210 """
211 :param path: the path to the inventory config file
212 :return the contents of the config file
213 """
214 if not super().verify_file(path):
215 return False
216
217 if hasattr(self, "INVENTORY_FILE_SUFFIXES"):
218 if not path.endswith(self.INVENTORY_FILE_SUFFIXES):
219 return False
220
221 return True
222
[end of plugins/plugin_utils/inventory.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugins/plugin_utils/inventory.py b/plugins/plugin_utils/inventory.py
--- a/plugins/plugin_utils/inventory.py
+++ b/plugins/plugin_utils/inventory.py
@@ -33,7 +33,10 @@
"secret_key",
"session_token",
"profile",
- "iam_role_name",
+ "endpoint_url",
+ "assume_role_arn",
+ "region",
+ "regions",
)
def __init__(self, templar, options):
@@ -48,20 +51,21 @@
def get(self, *args):
value = self.original_options.get(*args)
- if not value:
- return value
- if args[0] not in self.TEMPLATABLE_OPTIONS:
- return value
- if not self.templar.is_template(value):
+ if (
+ not value
+ or not self.templar
+ or args[0] not in self.TEMPLATABLE_OPTIONS
+ or not self.templar.is_template(value)
+ ):
return value
return self.templar.template(variable=value, disable_lookups=False)
def get_options(self, *args):
- original_options = super().get_options(*args)
- if not self.templar:
- return original_options
- return self.TemplatedOptions(self.templar, original_options)
+ return self.TemplatedOptions(self.templar, super().get_options(*args))
+
+ def get_option(self, option, hostvars=None):
+ return self.TemplatedOptions(self.templar, {option: super().get_option(option, hostvars)}).get(option)
def __init__(self):
super().__init__()
@@ -109,8 +113,7 @@
}
def _set_frozen_credentials(self):
- options = self.get_options()
- iam_role_arn = options.get("assume_role_arn")
+ iam_role_arn = self.get_option("assume_role_arn")
if iam_role_arn:
self._freeze_iam_role(iam_role_arn)
@@ -136,10 +139,9 @@
return None
def _boto3_regions(self, service):
- options = self.get_options()
-
- if options.get("regions"):
- return options.get("regions")
+ regions = self.get_option("regions")
+ if regions:
+ return regions
# boto3 has hard coded lists of available regions for resources, however this does bit-rot
# As such we try to query the service, and fall back to ec2 for a list of regions
@@ -149,7 +151,7 @@
return regions
# fallback to local list hardcoded in boto3 if still no regions
- session = _boto3_session(options.get("profile"))
+ session = _boto3_session(self.get_option("profile"))
regions = session.get_available_regions(service)
if not regions:
| {"golden_diff": "diff --git a/plugins/plugin_utils/inventory.py b/plugins/plugin_utils/inventory.py\n--- a/plugins/plugin_utils/inventory.py\n+++ b/plugins/plugin_utils/inventory.py\n@@ -33,7 +33,10 @@\n \"secret_key\",\n \"session_token\",\n \"profile\",\n- \"iam_role_name\",\n+ \"endpoint_url\",\n+ \"assume_role_arn\",\n+ \"region\",\n+ \"regions\",\n )\n \n def __init__(self, templar, options):\n@@ -48,20 +51,21 @@\n \n def get(self, *args):\n value = self.original_options.get(*args)\n- if not value:\n- return value\n- if args[0] not in self.TEMPLATABLE_OPTIONS:\n- return value\n- if not self.templar.is_template(value):\n+ if (\n+ not value\n+ or not self.templar\n+ or args[0] not in self.TEMPLATABLE_OPTIONS\n+ or not self.templar.is_template(value)\n+ ):\n return value\n \n return self.templar.template(variable=value, disable_lookups=False)\n \n def get_options(self, *args):\n- original_options = super().get_options(*args)\n- if not self.templar:\n- return original_options\n- return self.TemplatedOptions(self.templar, original_options)\n+ return self.TemplatedOptions(self.templar, super().get_options(*args))\n+\n+ def get_option(self, option, hostvars=None):\n+ return self.TemplatedOptions(self.templar, {option: super().get_option(option, hostvars)}).get(option)\n \n def __init__(self):\n super().__init__()\n@@ -109,8 +113,7 @@\n }\n \n def _set_frozen_credentials(self):\n- options = self.get_options()\n- iam_role_arn = options.get(\"assume_role_arn\")\n+ iam_role_arn = self.get_option(\"assume_role_arn\")\n if iam_role_arn:\n self._freeze_iam_role(iam_role_arn)\n \n@@ -136,10 +139,9 @@\n return None\n \n def _boto3_regions(self, service):\n- options = self.get_options()\n-\n- if options.get(\"regions\"):\n- return options.get(\"regions\")\n+ regions = self.get_option(\"regions\")\n+ if regions:\n+ return regions\n \n # boto3 has hard coded lists of available regions for resources, however this does bit-rot\n # As such we try to query the service, and fall back to ec2 for a list of regions\n@@ -149,7 +151,7 @@\n return regions\n \n # fallback to local list hardcoded in boto3 if still no regions\n- session = _boto3_session(options.get(\"profile\"))\n+ session = _boto3_session(self.get_option(\"profile\"))\n regions = session.get_available_regions(service)\n \n if not regions:\n", "issue": "`lookup` environment variables `aws_ec2.yml` file\n### Summary\n\nI'm trying to `lookup` environment variables in `aws_ec2.yml` file but the `lookup` seems to be not working as expected, is this something intentionally not allowed? \n\n### Issue Type\n\nBug Report\n\n### Component Name\n\nec2_instnace\n\n### Ansible Version\n\n```console (paste below)\r\n$ ansible --version\r\nansible [core 2.15.2]\r\n config file = /Users/<user>/.ansible.cfg\r\n configured module search path = ['/Users/<user>/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /opt/homebrew/lib/python3.11/site-packages/ansible\r\n ansible collection location = /Users/<user>/.ansible/collections:/usr/share/ansible/collections\r\n executable location = /opt/homebrew/bin/ansible\r\n python version = 3.11.6 (main, Oct 2 2023, 20:46:14) [Clang 14.0.3 (clang-1403.0.22.14.1)] (/opt/homebrew/opt/[email protected]/bin/python3.11)\r\n jinja version = 3.1.2\r\n libyaml = True\r\n```\r\n\n\n### Collection Versions\n\n```console (paste below)\r\n$ ansible-galaxy collection list\r\n\r\n# /Users/<user>/.ansible/collections/ansible_collections\r\nCollection Version\r\n--------------------- -------\r\namazon.aws 6.5.0 \r\nansible.windows 2.0.0 \r\nawx.awx 21.0.0 \r\nchocolatey.chocolatey 1.5.1 \r\ncommunity.mysql 3.7.2 \r\ncommunity.windows 2.0.0 \r\nlowlydba.sqlserver 0.7.0 \r\nmicrosoft.ad 1.4.1 ****\r\n```\r\n\n\n### AWS SDK versions\n\n```console (paste below)\r\n$ pip show boto boto3 botocore\r\nName: boto3\r\nVersion: 1.28.21\r\nSummary: The AWS SDK for Python\r\nHome-page: https://github.com/boto/boto3\r\nAuthor: Amazon Web Services\r\nAuthor-email: \r\nLicense: Apache License 2.0\r\nLocation: /opt/homebrew/lib/python3.11/site-packages\r\nRequires: botocore, jmespath, s3transfer\r\nRequired-by: \r\n---\r\nName: botocore\r\nVersion: 1.31.21\r\nSummary: Low-level, data-driven core of boto 3.\r\nHome-page: https://github.com/boto/botocore\r\nAuthor: Amazon Web Services\r\nAuthor-email: \r\nLicense: Apache License 2.0\r\nLocation: /opt/homebrew/lib/python3.11/site-packages\r\nRequires: jmespath, python-dateutil, urllib3\r\nRequired-by: aws-secretsmanager-caching, boto3, **s3transfer**\r\n```\r\n\n\n### Configuration\n\n```console (paste below)\r\n$ ansible-config dump --only-changed\r\nCONFIG_FILE() = /Users/<user>/.ansible.cfg\r\nPAGER(env: PAGER) = less\r\n```\r\n\n\n### OS / Environment\n\nmacOS Ventura Version 13.6.2 (22G320)\n\n### Steps to Reproduce\n\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml (paste below)\r\nplugin: aws_ec2\r\nassume_role_arn: \"arn:aws:iam::{{ lookup('env', 'AWS_ACCOUNT_ID') | default('XXXXXXXXXXX', true) }}:role/ansible-manager\"\r\nregions:\r\n - \"{{ lookup('env', 'AWS_REGION') | default('us-east-1', true) }}\"\r\n```\r\n\r\ni have this `yml` file which is something i was expecting it would lookup the env variables and get the authorization \n\n### Expected Results\n\nExpected result that `lookup` would happen and assume role/region would get set right, instead it is being parsed as plain text and `lookup` is not being recognized is what i understand looking at the result below\r\n\n\n### Actual Results\n\n```console (paste below)\r\naws_ec2.yml with auto plugin: An error occurred (AccessDenied) when calling the AssumeRole operation: User:\r\n<UserName> is not\r\nauthorized to perform: sts:AssumeRole on resource: arn:aws:iam::{{ lookup('ansible.builtin.env', 'AWS_ACCOUNT_ID') |\r\ndefault('XXXXXXXXXXX', True) }}:role/ansible-manager\r\n\r\naws_ec2.yml with auto plugin: Couldn't connect to AWS: Provided region_name '{{ lookup('env', 'AWS_REGION') |\r\ndefault('us-east-1', true) }}' doesn't match a supported format.\r\n```\r\n\n\n### Code of Conduct\n\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2022, Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\ntry:\n import boto3\n import botocore\nexcept ImportError:\n pass # will be captured by imported HAS_BOTO3\n\nfrom ansible.plugins.inventory import BaseInventoryPlugin\nfrom ansible.plugins.inventory import Cacheable\nfrom ansible.plugins.inventory import Constructable\n\nfrom ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code\nfrom ansible_collections.amazon.aws.plugins.plugin_utils.base import AWSPluginBase\nfrom ansible_collections.amazon.aws.plugins.plugin_utils.botocore import AnsibleBotocoreError\n\n\ndef _boto3_session(profile_name=None):\n if profile_name is None:\n return boto3.Session()\n return boto3.session.Session(profile_name=profile_name)\n\n\nclass AWSInventoryBase(BaseInventoryPlugin, Constructable, Cacheable, AWSPluginBase):\n class TemplatedOptions:\n # When someone looks up the TEMPLATABLE_OPTIONS using get() any templates\n # will be templated using the loader passed to parse.\n TEMPLATABLE_OPTIONS = (\n \"access_key\",\n \"secret_key\",\n \"session_token\",\n \"profile\",\n \"iam_role_name\",\n )\n\n def __init__(self, templar, options):\n self.original_options = options\n self.templar = templar\n\n def __getitem__(self, *args):\n return self.original_options.__getitem__(self, *args)\n\n def __setitem__(self, *args):\n return self.original_options.__setitem__(self, *args)\n\n def get(self, *args):\n value = self.original_options.get(*args)\n if not value:\n return value\n if args[0] not in self.TEMPLATABLE_OPTIONS:\n return value\n if not self.templar.is_template(value):\n return value\n\n return self.templar.template(variable=value, disable_lookups=False)\n\n def get_options(self, *args):\n original_options = super().get_options(*args)\n if not self.templar:\n return original_options\n return self.TemplatedOptions(self.templar, original_options)\n\n def __init__(self):\n super().__init__()\n self._frozen_credentials = {}\n\n # pylint: disable=too-many-arguments\n def parse(self, inventory, loader, path, cache=True, botocore_version=None, boto3_version=None):\n super().parse(inventory, loader, path)\n self.require_aws_sdk(botocore_version=botocore_version, boto3_version=boto3_version)\n self._read_config_data(path)\n self._set_frozen_credentials()\n\n def client(self, *args, **kwargs):\n kw_args = dict(self._frozen_credentials)\n kw_args.update(kwargs)\n return super().client(*args, **kw_args)\n\n def resource(self, *args, **kwargs):\n kw_args = dict(self._frozen_credentials)\n kw_args.update(kwargs)\n return super().resource(*args, **kw_args)\n\n def _freeze_iam_role(self, iam_role_arn):\n if hasattr(self, \"ansible_name\"):\n role_session_name = f\"ansible_aws_{self.ansible_name}_dynamic_inventory\"\n else:\n role_session_name = \"ansible_aws_dynamic_inventory\"\n assume_params = {\"RoleArn\": iam_role_arn, \"RoleSessionName\": role_session_name}\n\n try:\n sts = self.client(\"sts\")\n assumed_role = sts.assume_role(**assume_params)\n except AnsibleBotocoreError as e:\n self.fail_aws(f\"Unable to assume role {iam_role_arn}\", exception=e)\n\n credentials = assumed_role.get(\"Credentials\")\n if not credentials:\n self.fail_aws(f\"Unable to assume role {iam_role_arn}\")\n\n self._frozen_credentials = {\n \"profile_name\": None,\n \"aws_access_key_id\": credentials.get(\"AccessKeyId\"),\n \"aws_secret_access_key\": credentials.get(\"SecretAccessKey\"),\n \"aws_session_token\": credentials.get(\"SessionToken\"),\n }\n\n def _set_frozen_credentials(self):\n options = self.get_options()\n iam_role_arn = options.get(\"assume_role_arn\")\n if iam_role_arn:\n self._freeze_iam_role(iam_role_arn)\n\n def _describe_regions(self, service):\n # Try pulling a list of regions from the service\n try:\n initial_region = self.region or \"us-east-1\"\n client = self.client(service, region=initial_region)\n resp = client.describe_regions()\n except AttributeError:\n # Not all clients support describe\n pass\n except is_boto3_error_code(\"UnauthorizedOperation\"):\n self.warn(f\"UnauthorizedOperation when trying to list {service} regions\")\n except botocore.exceptions.NoRegionError:\n self.warn(f\"NoRegionError when trying to list {service} regions\")\n except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:\n self.warn(f\"Unexpected error while trying to list {service} regions: {e}\")\n else:\n regions = [x[\"RegionName\"] for x in resp.get(\"Regions\", [])]\n if regions:\n return regions\n return None\n\n def _boto3_regions(self, service):\n options = self.get_options()\n\n if options.get(\"regions\"):\n return options.get(\"regions\")\n\n # boto3 has hard coded lists of available regions for resources, however this does bit-rot\n # As such we try to query the service, and fall back to ec2 for a list of regions\n for resource_type in list({service, \"ec2\"}):\n regions = self._describe_regions(resource_type)\n if regions:\n return regions\n\n # fallback to local list hardcoded in boto3 if still no regions\n session = _boto3_session(options.get(\"profile\"))\n regions = session.get_available_regions(service)\n\n if not regions:\n # I give up, now you MUST give me regions\n self.fail_aws(\n \"Unable to get regions list from available methods, you must specify the 'regions' option to continue.\"\n )\n\n return regions\n\n def all_clients(self, service):\n \"\"\"\n Generator that yields a boto3 client and the region\n\n :param service: The boto3 service to connect to.\n\n Note: For services which don't support 'DescribeRegions' this may include bad\n endpoints, and as such EndpointConnectionError should be cleanly handled as a non-fatal\n error.\n \"\"\"\n regions = self._boto3_regions(service=service)\n\n for region in regions:\n connection = self.client(service, region=region)\n yield connection, region\n\n def get_cached_result(self, path, cache):\n # false when refresh_cache or --flush-cache is used\n if not cache:\n return False, None\n # get the user-specified directive\n if not self.get_option(\"cache\"):\n return False, None\n\n cache_key = self.get_cache_key(path)\n try:\n cached_value = self._cache[cache_key]\n except KeyError:\n # if cache expires or cache file doesn\"t exist\n return False, None\n\n return True, cached_value\n\n def update_cached_result(self, path, cache, result):\n if not self.get_option(\"cache\"):\n return\n\n cache_key = self.get_cache_key(path)\n # We weren't explicitly told to flush the cache, and there's already a cache entry,\n # this means that the result we're being passed came from the cache. As such we don't\n # want to \"update\" the cache as that could reset a TTL on the cache entry.\n if cache and cache_key in self._cache:\n return\n\n self._cache[cache_key] = result\n\n def verify_file(self, path):\n \"\"\"\n :param path: the path to the inventory config file\n :return the contents of the config file\n \"\"\"\n if not super().verify_file(path):\n return False\n\n if hasattr(self, \"INVENTORY_FILE_SUFFIXES\"):\n if not path.endswith(self.INVENTORY_FILE_SUFFIXES):\n return False\n\n return True\n", "path": "plugins/plugin_utils/inventory.py"}]} | 3,944 | 665 |
gh_patches_debug_15777 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-1112 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tests missing for `User.mention_markdown` and `User.mention_html`
And while we're at it. Maybe `helpers.mention_markdown/html` too.
</issue>
<code>
[start of telegram/utils/helpers.py]
1 #!/usr/bin/env python
2 #
3 # A library that provides a Python interface to the Telegram Bot API
4 # Copyright (C) 2015-2018
5 # Leandro Toledo de Souza <[email protected]>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Lesser Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Lesser Public License for more details.
16 #
17 # You should have received a copy of the GNU Lesser Public License
18 # along with this program. If not, see [http://www.gnu.org/licenses/].
19 """This module contains helper functions."""
20 from html import escape
21
22 import re
23 import signal
24 from datetime import datetime
25
26 # From https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python
27 _signames = {v: k
28 for k, v in reversed(sorted(vars(signal).items()))
29 if k.startswith('SIG') and not k.startswith('SIG_')}
30
31
32 def get_signal_name(signum):
33 """Returns the signal name of the given signal number."""
34 return _signames[signum]
35
36
37 # Not using future.backports.datetime here as datetime value might be an input from the user,
38 # making every isinstace() call more delicate. So we just use our own compat layer.
39 if hasattr(datetime, 'timestamp'):
40 # Python 3.3+
41 def _timestamp(dt_obj):
42 return dt_obj.timestamp()
43 else:
44 # Python < 3.3 (incl 2.7)
45 from time import mktime
46
47 def _timestamp(dt_obj):
48 return mktime(dt_obj.timetuple())
49
50
51 def escape_markdown(text):
52 """Helper function to escape telegram markup symbols."""
53 escape_chars = '\*_`\['
54 return re.sub(r'([%s])' % escape_chars, r'\\\1', text)
55
56
57 def to_timestamp(dt_obj):
58 """
59 Args:
60 dt_obj (:class:`datetime.datetime`):
61
62 Returns:
63 int:
64
65 """
66 if not dt_obj:
67 return None
68
69 return int(_timestamp(dt_obj))
70
71
72 def from_timestamp(unixtime):
73 """
74 Args:
75 unixtime (int):
76
77 Returns:
78 datetime.datetime:
79
80 """
81 if not unixtime:
82 return None
83
84 return datetime.fromtimestamp(unixtime)
85
86
87 def mention_html(user_id, name):
88 """
89 Args:
90 user_id (:obj:`int`) The user's id which you want to mention.
91 name (:obj:`str`) The name the mention is showing.
92
93 Returns:
94 :obj:`str`: The inline mention for the user as html.
95 """
96 if isinstance(user_id, int):
97 return '<a href="tg://user?id={}">{}</a>'.format(user_id, escape(name))
98
99
100 def mention_markdown(user_id, name):
101 """
102 Args:
103 user_id (:obj:`int`) The user's id which you want to mention.
104 name (:obj:`str`) The name the mention is showing.
105
106 Returns:
107 :obj:`str`: The inline mention for the user as markdown.
108 """
109 if isinstance(user_id, int):
110 return '[{}](tg://user?id={})'.format(escape_markdown(name), user_id)
111
112
113 def effective_message_type(entity):
114 """
115 Extracts the type of message as a string identifier from a :class:`telegram.Message` or a
116 :class:`telegram.Update`.
117
118 Args:
119 entity (:obj:`Update` | :obj:`Message`) The ``update`` or ``message`` to extract from
120
121 Returns:
122 str: One of ``Message.MESSAGE_TYPES``
123
124 """
125
126 # Importing on file-level yields cyclic Import Errors
127 from telegram import Message
128 from telegram import Update
129
130 if isinstance(entity, Message):
131 message = entity
132 elif isinstance(entity, Update):
133 message = entity.effective_message
134 else:
135 raise TypeError("entity is not Message or Update (got: {})".format(type(entity)))
136
137 for i in Message.MESSAGE_TYPES:
138 if getattr(message, i, None):
139 return i
140
141 return None
142
[end of telegram/utils/helpers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/telegram/utils/helpers.py b/telegram/utils/helpers.py
--- a/telegram/utils/helpers.py
+++ b/telegram/utils/helpers.py
@@ -94,7 +94,7 @@
:obj:`str`: The inline mention for the user as html.
"""
if isinstance(user_id, int):
- return '<a href="tg://user?id={}">{}</a>'.format(user_id, escape(name))
+ return u'<a href="tg://user?id={}">{}</a>'.format(user_id, escape(name))
def mention_markdown(user_id, name):
@@ -107,7 +107,7 @@
:obj:`str`: The inline mention for the user as markdown.
"""
if isinstance(user_id, int):
- return '[{}](tg://user?id={})'.format(escape_markdown(name), user_id)
+ return u'[{}](tg://user?id={})'.format(escape_markdown(name), user_id)
def effective_message_type(entity):
| {"golden_diff": "diff --git a/telegram/utils/helpers.py b/telegram/utils/helpers.py\n--- a/telegram/utils/helpers.py\n+++ b/telegram/utils/helpers.py\n@@ -94,7 +94,7 @@\n :obj:`str`: The inline mention for the user as html.\n \"\"\"\n if isinstance(user_id, int):\n- return '<a href=\"tg://user?id={}\">{}</a>'.format(user_id, escape(name))\n+ return u'<a href=\"tg://user?id={}\">{}</a>'.format(user_id, escape(name))\n \n \n def mention_markdown(user_id, name):\n@@ -107,7 +107,7 @@\n :obj:`str`: The inline mention for the user as markdown.\n \"\"\"\n if isinstance(user_id, int):\n- return '[{}](tg://user?id={})'.format(escape_markdown(name), user_id)\n+ return u'[{}](tg://user?id={})'.format(escape_markdown(name), user_id)\n \n \n def effective_message_type(entity):\n", "issue": "Tests missing for `User.mention_markdown` and `User.mention_html`\nAnd while we're at it. Maybe `helpers.mention_markdown/html` too.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains helper functions.\"\"\"\nfrom html import escape\n\nimport re\nimport signal\nfrom datetime import datetime\n\n# From https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python\n_signames = {v: k\n for k, v in reversed(sorted(vars(signal).items()))\n if k.startswith('SIG') and not k.startswith('SIG_')}\n\n\ndef get_signal_name(signum):\n \"\"\"Returns the signal name of the given signal number.\"\"\"\n return _signames[signum]\n\n\n# Not using future.backports.datetime here as datetime value might be an input from the user,\n# making every isinstace() call more delicate. So we just use our own compat layer.\nif hasattr(datetime, 'timestamp'):\n # Python 3.3+\n def _timestamp(dt_obj):\n return dt_obj.timestamp()\nelse:\n # Python < 3.3 (incl 2.7)\n from time import mktime\n\n def _timestamp(dt_obj):\n return mktime(dt_obj.timetuple())\n\n\ndef escape_markdown(text):\n \"\"\"Helper function to escape telegram markup symbols.\"\"\"\n escape_chars = '\\*_`\\['\n return re.sub(r'([%s])' % escape_chars, r'\\\\\\1', text)\n\n\ndef to_timestamp(dt_obj):\n \"\"\"\n Args:\n dt_obj (:class:`datetime.datetime`):\n\n Returns:\n int:\n\n \"\"\"\n if not dt_obj:\n return None\n\n return int(_timestamp(dt_obj))\n\n\ndef from_timestamp(unixtime):\n \"\"\"\n Args:\n unixtime (int):\n\n Returns:\n datetime.datetime:\n\n \"\"\"\n if not unixtime:\n return None\n\n return datetime.fromtimestamp(unixtime)\n\n\ndef mention_html(user_id, name):\n \"\"\"\n Args:\n user_id (:obj:`int`) The user's id which you want to mention.\n name (:obj:`str`) The name the mention is showing.\n\n Returns:\n :obj:`str`: The inline mention for the user as html.\n \"\"\"\n if isinstance(user_id, int):\n return '<a href=\"tg://user?id={}\">{}</a>'.format(user_id, escape(name))\n\n\ndef mention_markdown(user_id, name):\n \"\"\"\n Args:\n user_id (:obj:`int`) The user's id which you want to mention.\n name (:obj:`str`) The name the mention is showing.\n\n Returns:\n :obj:`str`: The inline mention for the user as markdown.\n \"\"\"\n if isinstance(user_id, int):\n return '[{}](tg://user?id={})'.format(escape_markdown(name), user_id)\n\n\ndef effective_message_type(entity):\n \"\"\"\n Extracts the type of message as a string identifier from a :class:`telegram.Message` or a\n :class:`telegram.Update`.\n\n Args:\n entity (:obj:`Update` | :obj:`Message`) The ``update`` or ``message`` to extract from\n\n Returns:\n str: One of ``Message.MESSAGE_TYPES``\n\n \"\"\"\n\n # Importing on file-level yields cyclic Import Errors\n from telegram import Message\n from telegram import Update\n\n if isinstance(entity, Message):\n message = entity\n elif isinstance(entity, Update):\n message = entity.effective_message\n else:\n raise TypeError(\"entity is not Message or Update (got: {})\".format(type(entity)))\n\n for i in Message.MESSAGE_TYPES:\n if getattr(message, i, None):\n return i\n\n return None\n", "path": "telegram/utils/helpers.py"}]} | 1,847 | 219 |
gh_patches_debug_2678 | rasdani/github-patches | git_diff | pretalx__pretalx-381 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
installation crashes when there are no config files
## Current Behavior
```
$ cd pretalx
$ pip-3.6 install . --user
(...)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/tmp/pip-xa87l9tk-build/pretalx/settings.py", line 460, in <module>
plugins=PLUGINS
File "/tmp/pip-xa87l9tk-build/pretalx/common/settings/utils.py", line 11, in log_initial
(f'Read from: {", ".join(config_files)}', False),
TypeError: can only join an iterable
```
if there are no config files at all, the installation crashes, because `config_files` is `None`.
## Your Environment
* Version used: master
* Operating System and version (desktop or mobile): FreeBSD
</issue>
<code>
[start of src/pretalx/common/settings/config.py]
1 import configparser
2 import os
3 import sys
4
5 from pretalx.common.settings.utils import reduce_dict
6
7 CONFIG = {
8 'filesystem': {
9 'base': {
10 'default': os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))),
11 },
12 'logs': {
13 'default': None,
14 'env': os.getenv('PRETALX_FILESYSTEM_LOGS'),
15 },
16 'media': {
17 'default': None,
18 'env': os.getenv('PRETALX_FILESYSTEM_MEDIA'),
19 },
20 'static': {
21 'default': None,
22 'env': os.getenv('PRETALX_FILESYSTEM_STATIC'),
23 },
24 },
25 'site': {
26 'debug': {
27 'default': 'runserver' in sys.argv,
28 'env': os.getenv('PRETALX_DEBUG'),
29 },
30 'url': {
31 'default': 'http://localhost',
32 'env': os.getenv('PRETALX_SITE_URL'),
33 },
34 'https': {
35 'env': os.getenv('PRETALX_HTTPS'),
36 },
37 'cookie_domain': {
38 'default': '',
39 'env': os.getenv('PRETALX_COOKIE_DOMAIN'),
40 },
41 },
42 'database': {
43 'backend': {
44 'default': 'sqlite3',
45 'env': os.getenv('PRETALX_DB_TYPE'),
46 },
47 'name': {
48 'env': os.getenv('PRETALX_DB_NAME'),
49 },
50 'user': {
51 'default': '',
52 'env': os.getenv('PRETALX_DB_USER'),
53 },
54 'password': {
55 'default': '',
56 'env': os.getenv('PRETALX_DB_PASS'),
57 },
58 'host': {
59 'default': '',
60 'env': os.getenv('PRETALX_DB_HOST'),
61 },
62 'port': {
63 'default': '',
64 'env': os.getenv('PRETALX_DB_PORT'),
65 },
66 },
67 'mail': {
68 'from': {
69 'default': 'admin@localhost',
70 'env': os.getenv('PRETALX_MAIL_FROM'),
71 },
72 'host': {
73 'default': 'localhost',
74 'env': os.getenv('PRETALX_MAIL_HOST'),
75 },
76 'port': {
77 'default': '25',
78 'env': os.getenv('PRETALX_MAIL_PORT'),
79 },
80 'user': {
81 'default': '',
82 'env': os.getenv('PRETALX_MAIL_USER'),
83 },
84 'password': {
85 'default': '',
86 'env': os.getenv('PRETALX_MAIL_PASSWORD'),
87 },
88 'tls': {
89 'default': 'False',
90 'env': os.getenv('PRETALX_MAIL_TLS'),
91 },
92 'ssl': {
93 'default': 'False',
94 'env': os.getenv('PRETALX_MAIL_SSL'),
95 },
96 },
97 'cache': {
98 },
99 'celery': {
100 'broker': {
101 'default': '',
102 'env': os.getenv('PRETALX_CELERY_BROKER'),
103 },
104 'backend': {
105 'default': '',
106 'env': os.getenv('PRETALX_CELERY_BACKEND'),
107 },
108 },
109 'logging': {
110 'email': {
111 'default': '',
112 'env': os.getenv('PRETALX_LOGGING_EMAIL'),
113 },
114 'email_level': {
115 'default': '',
116 'env': os.getenv('PRETALX_LOGGING_EMAIL_LEVEL'),
117 },
118 },
119 }
120
121
122 def read_config_files(config):
123 if 'PRETALX_CONFIG_FILE' in os.environ:
124 config_files = config.read_file(open(os.environ.get('PRETALX_CONFIG_FILE'), encoding='utf-8'))
125 else:
126 config_files = config.read([
127 '/etc/pretalx/pretalx.cfg',
128 os.path.expanduser('~/.pretalx.cfg'),
129 'pretalx.cfg',
130 ], encoding='utf-8')
131 return config, config_files
132
133
134 def read_layer(layer_name, config):
135 config_dict = reduce_dict({
136 section_name: {
137 key: value.get(layer_name)
138 for key, value in section_content.items()
139 }
140 for section_name, section_content in CONFIG.items()
141 })
142 config.read_dict(config_dict)
143 return config
144
145
146 def build_config():
147 config = configparser.RawConfigParser()
148 config = read_layer('default', config)
149 config, config_files = read_config_files(config)
150 config = read_layer('env', config)
151 return config, config_files
152
[end of src/pretalx/common/settings/config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pretalx/common/settings/config.py b/src/pretalx/common/settings/config.py
--- a/src/pretalx/common/settings/config.py
+++ b/src/pretalx/common/settings/config.py
@@ -128,7 +128,7 @@
os.path.expanduser('~/.pretalx.cfg'),
'pretalx.cfg',
], encoding='utf-8')
- return config, config_files
+ return config, config_files or [] # .read() returns None, if there are no config files
def read_layer(layer_name, config):
| {"golden_diff": "diff --git a/src/pretalx/common/settings/config.py b/src/pretalx/common/settings/config.py\n--- a/src/pretalx/common/settings/config.py\n+++ b/src/pretalx/common/settings/config.py\n@@ -128,7 +128,7 @@\n os.path.expanduser('~/.pretalx.cfg'),\n 'pretalx.cfg',\n ], encoding='utf-8')\n- return config, config_files\n+ return config, config_files or [] # .read() returns None, if there are no config files\n \n \n def read_layer(layer_name, config):\n", "issue": "installation crashes when there are no config files\n## Current Behavior\r\n```\r\n$ cd pretalx\r\n$ pip-3.6 install . --user\r\n(...)\r\n File \"<frozen importlib._bootstrap>\", line 994, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 971, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 955, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 665, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 678, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/tmp/pip-xa87l9tk-build/pretalx/settings.py\", line 460, in <module>\r\n plugins=PLUGINS\r\n File \"/tmp/pip-xa87l9tk-build/pretalx/common/settings/utils.py\", line 11, in log_initial\r\n (f'Read from: {\", \".join(config_files)}', False),\r\n TypeError: can only join an iterable\r\n```\r\n\r\nif there are no config files at all, the installation crashes, because `config_files` is `None`.\r\n\r\n## Your Environment\r\n\r\n* Version used: master\r\n* Operating System and version (desktop or mobile): FreeBSD\r\n\n", "before_files": [{"content": "import configparser\nimport os\nimport sys\n\nfrom pretalx.common.settings.utils import reduce_dict\n\nCONFIG = {\n 'filesystem': {\n 'base': {\n 'default': os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))),\n },\n 'logs': {\n 'default': None,\n 'env': os.getenv('PRETALX_FILESYSTEM_LOGS'),\n },\n 'media': {\n 'default': None,\n 'env': os.getenv('PRETALX_FILESYSTEM_MEDIA'),\n },\n 'static': {\n 'default': None,\n 'env': os.getenv('PRETALX_FILESYSTEM_STATIC'),\n },\n },\n 'site': {\n 'debug': {\n 'default': 'runserver' in sys.argv,\n 'env': os.getenv('PRETALX_DEBUG'),\n },\n 'url': {\n 'default': 'http://localhost',\n 'env': os.getenv('PRETALX_SITE_URL'),\n },\n 'https': {\n 'env': os.getenv('PRETALX_HTTPS'),\n },\n 'cookie_domain': {\n 'default': '',\n 'env': os.getenv('PRETALX_COOKIE_DOMAIN'),\n },\n },\n 'database': {\n 'backend': {\n 'default': 'sqlite3',\n 'env': os.getenv('PRETALX_DB_TYPE'),\n },\n 'name': {\n 'env': os.getenv('PRETALX_DB_NAME'),\n },\n 'user': {\n 'default': '',\n 'env': os.getenv('PRETALX_DB_USER'),\n },\n 'password': {\n 'default': '',\n 'env': os.getenv('PRETALX_DB_PASS'),\n },\n 'host': {\n 'default': '',\n 'env': os.getenv('PRETALX_DB_HOST'),\n },\n 'port': {\n 'default': '',\n 'env': os.getenv('PRETALX_DB_PORT'),\n },\n },\n 'mail': {\n 'from': {\n 'default': 'admin@localhost',\n 'env': os.getenv('PRETALX_MAIL_FROM'),\n },\n 'host': {\n 'default': 'localhost',\n 'env': os.getenv('PRETALX_MAIL_HOST'),\n },\n 'port': {\n 'default': '25',\n 'env': os.getenv('PRETALX_MAIL_PORT'),\n },\n 'user': {\n 'default': '',\n 'env': os.getenv('PRETALX_MAIL_USER'),\n },\n 'password': {\n 'default': '',\n 'env': os.getenv('PRETALX_MAIL_PASSWORD'),\n },\n 'tls': {\n 'default': 'False',\n 'env': os.getenv('PRETALX_MAIL_TLS'),\n },\n 'ssl': {\n 'default': 'False',\n 'env': os.getenv('PRETALX_MAIL_SSL'),\n },\n },\n 'cache': {\n },\n 'celery': {\n 'broker': {\n 'default': '',\n 'env': os.getenv('PRETALX_CELERY_BROKER'),\n },\n 'backend': {\n 'default': '',\n 'env': os.getenv('PRETALX_CELERY_BACKEND'),\n },\n },\n 'logging': {\n 'email': {\n 'default': '',\n 'env': os.getenv('PRETALX_LOGGING_EMAIL'),\n },\n 'email_level': {\n 'default': '',\n 'env': os.getenv('PRETALX_LOGGING_EMAIL_LEVEL'),\n },\n },\n}\n\n\ndef read_config_files(config):\n if 'PRETALX_CONFIG_FILE' in os.environ:\n config_files = config.read_file(open(os.environ.get('PRETALX_CONFIG_FILE'), encoding='utf-8'))\n else:\n config_files = config.read([\n '/etc/pretalx/pretalx.cfg',\n os.path.expanduser('~/.pretalx.cfg'),\n 'pretalx.cfg',\n ], encoding='utf-8')\n return config, config_files\n\n\ndef read_layer(layer_name, config):\n config_dict = reduce_dict({\n section_name: {\n key: value.get(layer_name)\n for key, value in section_content.items()\n }\n for section_name, section_content in CONFIG.items()\n })\n config.read_dict(config_dict)\n return config\n\n\ndef build_config():\n config = configparser.RawConfigParser()\n config = read_layer('default', config)\n config, config_files = read_config_files(config)\n config = read_layer('env', config)\n return config, config_files\n", "path": "src/pretalx/common/settings/config.py"}]} | 2,184 | 130 |
gh_patches_debug_29983 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-3146 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
testing interactive event: remove + on call to action
**URL:** https://meinberlin-dev.liqd.net/projekte/module/interaktive-veranstaltung-2/
**user:** unregistered user
**expected behaviour:** buttons should be same all over platform
**behaviour:** there is a + on the button
**important screensize:**
**device & browser:**
**Comment/Question:** please take out the + before add question
Screenshot?
<img width="692" alt="Bildschirmfoto 2020-09-22 um 17 51 38" src="https://user-images.githubusercontent.com/35491681/93906276-494d9200-fcfc-11ea-9614-3a9359b5ec97.png">
</issue>
<code>
[start of meinberlin/apps/projects/templatetags/meinberlin_project_tags.py]
1 from django import template
2
3 from adhocracy4.comments.models import Comment
4 from meinberlin.apps.budgeting.models import Proposal as budget_proposal
5 from meinberlin.apps.ideas.models import Idea
6 from meinberlin.apps.kiezkasse.models import Proposal as kiezkasse_proposal
7 from meinberlin.apps.mapideas.models import MapIdea
8 from meinberlin.apps.polls.models import Vote
9 from meinberlin.apps.projects import get_project_type
10
11 register = template.Library()
12
13
14 @register.filter
15 def project_url(project):
16 if (project.project_type == 'meinberlin_bplan.Bplan'
17 or project.project_type ==
18 'meinberlin_extprojects.ExternalProject'):
19 return project.externalproject.url
20 return project.get_absolute_url()
21
22
23 @register.filter
24 def project_type(project):
25 return get_project_type(project)
26
27
28 @register.filter
29 def is_external(project):
30 return (project.project_type == 'meinberlin_bplan.Bplan'
31 or project.project_type ==
32 'meinberlin_extprojects.ExternalProject')
33
34
35 @register.filter
36 def is_a4_project(project):
37 return (project.project_type == 'a4projects.Project')
38
39
40 @register.simple_tag
41 def get_num_entries(module):
42 """Count all user-generated items."""
43 item_count = \
44 Idea.objects.filter(module=module).count() \
45 + MapIdea.objects.filter(module=module).count() \
46 + budget_proposal.objects.filter(module=module).count() \
47 + kiezkasse_proposal.objects.filter(module=module).count() \
48 + Comment.objects.filter(idea__module=module).count() \
49 + Comment.objects.filter(mapidea__module=module).count() \
50 + Comment.objects.filter(budget_proposal__module=module).count() \
51 + Comment.objects.filter(kiezkasse_proposal__module=module).count() \
52 + Comment.objects.filter(topic__module=module).count() \
53 + Comment.objects.filter(maptopic__module=module).count() \
54 + Comment.objects.filter(paragraph__chapter__module=module).count() \
55 + Comment.objects.filter(chapter__module=module).count() \
56 + Comment.objects.filter(poll__module=module).count() \
57 + Vote.objects.filter(choice__question__poll__module=module).count()
58 return item_count
59
[end of meinberlin/apps/projects/templatetags/meinberlin_project_tags.py]
[start of meinberlin/apps/livequestions/phases.py]
1 from django.utils.translation import ugettext_lazy as _
2
3 from adhocracy4 import phases
4
5 from . import apps
6 from . import models
7 from . import views
8
9
10 class IssuePhase(phases.PhaseContent):
11 app = apps.Config.label
12 phase = 'issue'
13 view = views.LiveQuestionModuleDetail
14
15 name = _('Issue phase')
16 description = _('Add question.')
17 module_name = _('Interactive Event')
18 icon = 'lightbulb-o'
19
20 features = {
21 'crud': (models.LiveQuestion,),
22 'like': (models.LiveQuestion,)
23 }
24
25
26 phases.content.register(IssuePhase())
27
[end of meinberlin/apps/livequestions/phases.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/livequestions/phases.py b/meinberlin/apps/livequestions/phases.py
--- a/meinberlin/apps/livequestions/phases.py
+++ b/meinberlin/apps/livequestions/phases.py
@@ -13,7 +13,7 @@
view = views.LiveQuestionModuleDetail
name = _('Issue phase')
- description = _('Add question.')
+ description = _('Add questions and support.')
module_name = _('Interactive Event')
icon = 'lightbulb-o'
diff --git a/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py b/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py
--- a/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py
+++ b/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py
@@ -4,6 +4,8 @@
from meinberlin.apps.budgeting.models import Proposal as budget_proposal
from meinberlin.apps.ideas.models import Idea
from meinberlin.apps.kiezkasse.models import Proposal as kiezkasse_proposal
+from meinberlin.apps.likes.models import Like
+from meinberlin.apps.livequestions.models import LiveQuestion
from meinberlin.apps.mapideas.models import MapIdea
from meinberlin.apps.polls.models import Vote
from meinberlin.apps.projects import get_project_type
@@ -54,5 +56,7 @@
+ Comment.objects.filter(paragraph__chapter__module=module).count() \
+ Comment.objects.filter(chapter__module=module).count() \
+ Comment.objects.filter(poll__module=module).count() \
- + Vote.objects.filter(choice__question__poll__module=module).count()
+ + Vote.objects.filter(choice__question__poll__module=module).count() \
+ + LiveQuestion.objects.filter(module=module).count() \
+ + Like.objects.filter(question__module=module).count()
return item_count
| {"golden_diff": "diff --git a/meinberlin/apps/livequestions/phases.py b/meinberlin/apps/livequestions/phases.py\n--- a/meinberlin/apps/livequestions/phases.py\n+++ b/meinberlin/apps/livequestions/phases.py\n@@ -13,7 +13,7 @@\n view = views.LiveQuestionModuleDetail\n \n name = _('Issue phase')\n- description = _('Add question.')\n+ description = _('Add questions and support.')\n module_name = _('Interactive Event')\n icon = 'lightbulb-o'\n \ndiff --git a/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py b/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py\n--- a/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py\n+++ b/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py\n@@ -4,6 +4,8 @@\n from meinberlin.apps.budgeting.models import Proposal as budget_proposal\n from meinberlin.apps.ideas.models import Idea\n from meinberlin.apps.kiezkasse.models import Proposal as kiezkasse_proposal\n+from meinberlin.apps.likes.models import Like\n+from meinberlin.apps.livequestions.models import LiveQuestion\n from meinberlin.apps.mapideas.models import MapIdea\n from meinberlin.apps.polls.models import Vote\n from meinberlin.apps.projects import get_project_type\n@@ -54,5 +56,7 @@\n + Comment.objects.filter(paragraph__chapter__module=module).count() \\\n + Comment.objects.filter(chapter__module=module).count() \\\n + Comment.objects.filter(poll__module=module).count() \\\n- + Vote.objects.filter(choice__question__poll__module=module).count()\n+ + Vote.objects.filter(choice__question__poll__module=module).count() \\\n+ + LiveQuestion.objects.filter(module=module).count() \\\n+ + Like.objects.filter(question__module=module).count()\n return item_count\n", "issue": "testing interactive event: remove + on call to action\n**URL:** https://meinberlin-dev.liqd.net/projekte/module/interaktive-veranstaltung-2/\r\n**user:** unregistered user\r\n**expected behaviour:** buttons should be same all over platform\r\n**behaviour:** there is a + on the button\r\n**important screensize:**\r\n**device & browser:** \r\n**Comment/Question:** please take out the + before add question\r\n\r\n\r\nScreenshot?\r\n<img width=\"692\" alt=\"Bildschirmfoto 2020-09-22 um 17 51 38\" src=\"https://user-images.githubusercontent.com/35491681/93906276-494d9200-fcfc-11ea-9614-3a9359b5ec97.png\">\r\n\n", "before_files": [{"content": "from django import template\n\nfrom adhocracy4.comments.models import Comment\nfrom meinberlin.apps.budgeting.models import Proposal as budget_proposal\nfrom meinberlin.apps.ideas.models import Idea\nfrom meinberlin.apps.kiezkasse.models import Proposal as kiezkasse_proposal\nfrom meinberlin.apps.mapideas.models import MapIdea\nfrom meinberlin.apps.polls.models import Vote\nfrom meinberlin.apps.projects import get_project_type\n\nregister = template.Library()\n\n\[email protected]\ndef project_url(project):\n if (project.project_type == 'meinberlin_bplan.Bplan'\n or project.project_type ==\n 'meinberlin_extprojects.ExternalProject'):\n return project.externalproject.url\n return project.get_absolute_url()\n\n\[email protected]\ndef project_type(project):\n return get_project_type(project)\n\n\[email protected]\ndef is_external(project):\n return (project.project_type == 'meinberlin_bplan.Bplan'\n or project.project_type ==\n 'meinberlin_extprojects.ExternalProject')\n\n\[email protected]\ndef is_a4_project(project):\n return (project.project_type == 'a4projects.Project')\n\n\[email protected]_tag\ndef get_num_entries(module):\n \"\"\"Count all user-generated items.\"\"\"\n item_count = \\\n Idea.objects.filter(module=module).count() \\\n + MapIdea.objects.filter(module=module).count() \\\n + budget_proposal.objects.filter(module=module).count() \\\n + kiezkasse_proposal.objects.filter(module=module).count() \\\n + Comment.objects.filter(idea__module=module).count() \\\n + Comment.objects.filter(mapidea__module=module).count() \\\n + Comment.objects.filter(budget_proposal__module=module).count() \\\n + Comment.objects.filter(kiezkasse_proposal__module=module).count() \\\n + Comment.objects.filter(topic__module=module).count() \\\n + Comment.objects.filter(maptopic__module=module).count() \\\n + Comment.objects.filter(paragraph__chapter__module=module).count() \\\n + Comment.objects.filter(chapter__module=module).count() \\\n + Comment.objects.filter(poll__module=module).count() \\\n + Vote.objects.filter(choice__question__poll__module=module).count()\n return item_count\n", "path": "meinberlin/apps/projects/templatetags/meinberlin_project_tags.py"}, {"content": "from django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4 import phases\n\nfrom . import apps\nfrom . import models\nfrom . import views\n\n\nclass IssuePhase(phases.PhaseContent):\n app = apps.Config.label\n phase = 'issue'\n view = views.LiveQuestionModuleDetail\n\n name = _('Issue phase')\n description = _('Add question.')\n module_name = _('Interactive Event')\n icon = 'lightbulb-o'\n\n features = {\n 'crud': (models.LiveQuestion,),\n 'like': (models.LiveQuestion,)\n }\n\n\nphases.content.register(IssuePhase())\n", "path": "meinberlin/apps/livequestions/phases.py"}]} | 1,569 | 445 |
gh_patches_debug_17824 | rasdani/github-patches | git_diff | hydroshare__hydroshare-5083 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Track user agent for metrics
**Describe the feature you'd like and what it will do**
In HS v2.5.4, we don't track user_agent in our metrics. This makes it difficult to tell when requests to HS are occurring via direct UI interactions, or via other tools like hsclient.
**Why is this feature important?**
We need more insight into how HS' ecosystem of tools are being used. This information should drive our continued development on existing tools and our consideration of additions for future use.
**Is your feature request related to a problem? Please describe.**
It is difficult to make decisions without information.
</issue>
<code>
[start of hs_tracking/utils.py]
1 import robot_detection
2 from ipware.ip import get_ip
3 from hs_tools_resource.models import RequestUrlBase, RequestUrlBaseAggregation, RequestUrlBaseFile
4 from urllib.parse import urlparse
5
6
7 def get_client_ip(request):
8 return get_ip(request)
9
10
11 def get_user_type(session):
12 try:
13 user = session.visitor.user
14 usertype = user.userprofile.user_type
15 except AttributeError:
16 usertype = None
17 return usertype
18
19
20 def get_user_email_domain(session):
21 try:
22 user = session.visitor.user
23 emaildomain = user.email.split('@')[-1]
24 except AttributeError:
25 emaildomain = None
26 return emaildomain
27
28
29 def get_user_email_tld(session, emaildomain=None):
30 try:
31 if not emaildomain:
32 emaildomain = get_user_email_domain(session)
33 if emaildomain:
34 shortdomain = '.'.join(emaildomain.split('.')[1:])
35 return shortdomain
36 except AttributeError:
37 return None
38
39
40 def is_human(user_agent):
41 if robot_detection.is_robot(user_agent):
42 return False
43 return True
44
45
46 def get_std_log_fields(request, session=None):
47 """ returns a standard set of metadata that to each receiver function.
48 This ensures that all activities are reporting a consistent set of metrics
49 """
50 user_type = None
51 user_email_tld = None
52 full_domain = None
53 if session is not None:
54 user_type = get_user_type(session)
55 full_domain = get_user_email_domain(session)
56 user_email_tld = get_user_email_tld(session, full_domain)
57
58 return {
59 'user_ip': get_client_ip(request),
60 'user_type': user_type,
61 'user_email_domain': user_email_tld,
62 'user_email_domain_full': full_domain
63 }
64
65
66 def authentic_redirect_url(url):
67 """ Validates a url scheme and netloc is in an existing web app
68 :param url: String of a url
69 :return: Boolean, True if the url exists in a web app
70 """
71 if not url:
72 return False
73 u = urlparse(url)
74 url_base = "{}://{}".format(u.scheme, u.netloc)
75 return RequestUrlBase.objects.filter(value__startswith=url_base).exists() \
76 or RequestUrlBaseAggregation.objects.filter(value__startswith=url_base).exists() \
77 or RequestUrlBaseFile.objects.filter(value__startswith=url_base).exists()
78
[end of hs_tracking/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hs_tracking/utils.py b/hs_tracking/utils.py
--- a/hs_tracking/utils.py
+++ b/hs_tracking/utils.py
@@ -47,6 +47,12 @@
""" returns a standard set of metadata that to each receiver function.
This ensures that all activities are reporting a consistent set of metrics
"""
+ try:
+ user_agent = request.META['HTTP_USER_AGENT']
+ human = is_human(user_agent)
+ except KeyError:
+ user_agent = None
+ human = None
user_type = None
user_email_tld = None
full_domain = None
@@ -59,7 +65,9 @@
'user_ip': get_client_ip(request),
'user_type': user_type,
'user_email_domain': user_email_tld,
- 'user_email_domain_full': full_domain
+ 'user_email_domain_full': full_domain,
+ 'is_human': human,
+ 'user_agent': user_agent
}
| {"golden_diff": "diff --git a/hs_tracking/utils.py b/hs_tracking/utils.py\n--- a/hs_tracking/utils.py\n+++ b/hs_tracking/utils.py\n@@ -47,6 +47,12 @@\n \"\"\" returns a standard set of metadata that to each receiver function.\n This ensures that all activities are reporting a consistent set of metrics\n \"\"\"\n+ try:\n+ user_agent = request.META['HTTP_USER_AGENT']\n+ human = is_human(user_agent)\n+ except KeyError:\n+ user_agent = None\n+ human = None\n user_type = None\n user_email_tld = None\n full_domain = None\n@@ -59,7 +65,9 @@\n 'user_ip': get_client_ip(request),\n 'user_type': user_type,\n 'user_email_domain': user_email_tld,\n- 'user_email_domain_full': full_domain\n+ 'user_email_domain_full': full_domain,\n+ 'is_human': human,\n+ 'user_agent': user_agent\n }\n", "issue": "Track user agent for metrics\n**Describe the feature you'd like and what it will do**\r\nIn HS v2.5.4, we don't track user_agent in our metrics. This makes it difficult to tell when requests to HS are occurring via direct UI interactions, or via other tools like hsclient.\r\n\r\n\r\n**Why is this feature important?**\r\nWe need more insight into how HS' ecosystem of tools are being used. This information should drive our continued development on existing tools and our consideration of additions for future use.\r\n\r\n**Is your feature request related to a problem? Please describe.**\r\nIt is difficult to make decisions without information.\r\n\n", "before_files": [{"content": "import robot_detection\nfrom ipware.ip import get_ip\nfrom hs_tools_resource.models import RequestUrlBase, RequestUrlBaseAggregation, RequestUrlBaseFile\nfrom urllib.parse import urlparse\n\n\ndef get_client_ip(request):\n return get_ip(request)\n\n\ndef get_user_type(session):\n try:\n user = session.visitor.user\n usertype = user.userprofile.user_type\n except AttributeError:\n usertype = None\n return usertype\n\n\ndef get_user_email_domain(session):\n try:\n user = session.visitor.user\n emaildomain = user.email.split('@')[-1]\n except AttributeError:\n emaildomain = None\n return emaildomain\n\n\ndef get_user_email_tld(session, emaildomain=None):\n try:\n if not emaildomain:\n emaildomain = get_user_email_domain(session)\n if emaildomain:\n shortdomain = '.'.join(emaildomain.split('.')[1:])\n return shortdomain\n except AttributeError:\n return None\n\n\ndef is_human(user_agent):\n if robot_detection.is_robot(user_agent):\n return False\n return True\n\n\ndef get_std_log_fields(request, session=None):\n \"\"\" returns a standard set of metadata that to each receiver function.\n This ensures that all activities are reporting a consistent set of metrics\n \"\"\"\n user_type = None\n user_email_tld = None\n full_domain = None\n if session is not None:\n user_type = get_user_type(session)\n full_domain = get_user_email_domain(session)\n user_email_tld = get_user_email_tld(session, full_domain)\n\n return {\n 'user_ip': get_client_ip(request),\n 'user_type': user_type,\n 'user_email_domain': user_email_tld,\n 'user_email_domain_full': full_domain\n }\n\n\ndef authentic_redirect_url(url):\n \"\"\" Validates a url scheme and netloc is in an existing web app\n :param url: String of a url\n :return: Boolean, True if the url exists in a web app\n \"\"\"\n if not url:\n return False\n u = urlparse(url)\n url_base = \"{}://{}\".format(u.scheme, u.netloc)\n return RequestUrlBase.objects.filter(value__startswith=url_base).exists() \\\n or RequestUrlBaseAggregation.objects.filter(value__startswith=url_base).exists() \\\n or RequestUrlBaseFile.objects.filter(value__startswith=url_base).exists()\n", "path": "hs_tracking/utils.py"}]} | 1,325 | 221 |
gh_patches_debug_19918 | rasdani/github-patches | git_diff | deepchecks__deepchecks-971 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Drift titles are not clearly connected to their plots

This example (taken from https://docs.deepchecks.com/en/latest/examples/vision/guides/quickstart_in_5_minutes.html) shows that it is very hard to understand that the "area" title is the property that is drifted. This happens in the common code for drift plots. Need to find a clearer way to indicate the drifted feature, either:
1. Find a way to center and enlarge the title / add bold / underscore (while still having the "Additional output" title be superior to it)
2. Add the feature name also to the title of the plots themselves.
</issue>
<code>
[start of deepchecks/utils/distribution/drift.py]
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """Common utilities for distribution checks."""
12
13 from typing import Tuple, Union, Hashable, Callable
14
15 from scipy.stats import wasserstein_distance
16 import numpy as np
17 import pandas as pd
18
19 import plotly.graph_objects as go
20 from plotly.subplots import make_subplots
21
22 from deepchecks.utils.distribution.plot import drift_score_bar_traces, feature_distribution_traces
23 from deepchecks.utils.distribution.preprocessing import preprocess_2_cat_cols_to_same_bins
24 from deepchecks.core.errors import DeepchecksValueError
25
26
27 PSI_MIN_PERCENTAGE = 0.01
28
29
30 __all__ = ['calc_drift_and_plot']
31
32
33 def psi(expected_percents: np.ndarray, actual_percents: np.ndarray):
34 """
35 Calculate the PSI (Population Stability Index).
36
37 See https://www.lexjansen.com/wuss/2017/47_Final_Paper_PDF.pdf
38
39 Parameters
40 ----------
41 expected_percents: np.ndarray
42 array of percentages of each value in the expected distribution.
43 actual_percents: : np.ndarray
44 array of percentages of each value in the actual distribution.
45 Returns
46 -------
47 psi
48 The PSI score
49
50 """
51 psi_value = 0
52 for i in range(len(expected_percents)):
53 # In order for the value not to diverge, we cap our min percentage value
54 e_perc = max(expected_percents[i], PSI_MIN_PERCENTAGE)
55 a_perc = max(actual_percents[i], PSI_MIN_PERCENTAGE)
56 value = (e_perc - a_perc) * np.log(e_perc / a_perc)
57 psi_value += value
58
59 return psi_value
60
61
62 def earth_movers_distance(dist1: Union[np.ndarray, pd.Series], dist2: Union[np.ndarray, pd.Series]):
63 """
64 Calculate the Earth Movers Distance (Wasserstein distance).
65
66 See https://en.wikipedia.org/wiki/Wasserstein_metric
67
68 Function is for numerical data only.
69
70 Parameters
71 ----------
72 dist1 : Union[np.ndarray, pd.Series]
73 array of numberical values.
74 dist2 : Union[np.ndarray, pd.Series]
75 array of numberical values to compare dist1 to.
76 Returns
77 -------
78 Any
79 the Wasserstein distance between the two distributions.
80
81 """
82 unique1 = np.unique(dist1)
83 unique2 = np.unique(dist2)
84
85 sample_space = list(set(unique1).union(set(unique2)))
86
87 val_max = max(sample_space)
88 val_min = min(sample_space)
89
90 if val_max == val_min:
91 return 0
92
93 dist1 = (dist1 - val_min) / (val_max - val_min)
94 dist2 = (dist2 - val_min) / (val_max - val_min)
95
96 return wasserstein_distance(dist1, dist2)
97
98
99 def calc_drift_and_plot(train_column: pd.Series, test_column: pd.Series, plot_title: Hashable,
100 column_type: str, max_num_categories: int = 10) -> Tuple[float, str, Callable]:
101 """
102 Calculate drift score per column.
103
104 Parameters
105 ----------
106 train_column : pd.Series
107 column from train dataset
108 test_column : pd.Series
109 same column from test dataset
110 plot_title : Hashable
111 title of plot
112 column_type : str
113 type of column (either "numerical" or "categorical")
114 max_num_categories : int , default: 10
115 Max number of allowed categories. If there are more, they are binned into an "Other" category.
116 Returns
117 -------
118 Tuple[float, str, Callable]
119 drift score of the difference between the two columns' distributions (Earth movers distance for
120 numerical, PSI for categorical)
121 graph comparing the two distributions (density for numerical, stack bar for categorical)
122 """
123 train_dist = train_column.dropna().values.reshape(-1)
124 test_dist = test_column.dropna().values.reshape(-1)
125
126 if column_type == 'numerical':
127 scorer_name = "Earth Mover's Distance"
128
129 train_dist = train_dist.astype('float')
130 test_dist = test_dist.astype('float')
131
132 score = earth_movers_distance(dist1=train_dist, dist2=test_dist)
133
134 bar_traces, bar_x_axis, bar_y_axis = drift_score_bar_traces(score)
135 dist_traces, dist_x_axis, dist_y_axis = feature_distribution_traces(train_dist, test_dist)
136
137 elif column_type == 'categorical':
138 scorer_name = 'PSI'
139 expected_percents, actual_percents, _ = \
140 preprocess_2_cat_cols_to_same_bins(dist1=train_dist, dist2=test_dist, max_num_categories=max_num_categories)
141 score = psi(expected_percents=expected_percents, actual_percents=actual_percents)
142
143 bar_traces, bar_x_axis, bar_y_axis = drift_score_bar_traces(score, bar_max=1)
144 dist_traces, dist_x_axis, dist_y_axis = feature_distribution_traces(train_dist, test_dist, is_categorical=True,
145 max_num_categories=max_num_categories)
146 else:
147 # Should never reach here
148 raise DeepchecksValueError(f'Unsupported column type for drift: {column_type}')
149
150 fig = make_subplots(rows=2, cols=1, vertical_spacing=0.2, shared_yaxes=False, shared_xaxes=False,
151 row_heights=[0.1, 0.9],
152 subplot_titles=['Drift Score - ' + scorer_name, 'Distribution Plot'])
153
154 fig.add_traces(bar_traces, rows=[1] * len(bar_traces), cols=[1] * len(bar_traces))
155 fig.add_traces(dist_traces, rows=[2] * len(dist_traces), cols=[1] * len(dist_traces))
156
157 shared_layout = go.Layout(
158 xaxis=bar_x_axis,
159 yaxis=bar_y_axis,
160 xaxis2=dist_x_axis,
161 yaxis2=dist_y_axis,
162 legend=dict(
163 title='Dataset',
164 yanchor='top',
165 y=0.6),
166 width=700,
167 height=400,
168 title=plot_title
169 )
170
171 fig.update_layout(shared_layout)
172
173 return score, scorer_name, fig
174
[end of deepchecks/utils/distribution/drift.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/deepchecks/utils/distribution/drift.py b/deepchecks/utils/distribution/drift.py
--- a/deepchecks/utils/distribution/drift.py
+++ b/deepchecks/utils/distribution/drift.py
@@ -149,7 +149,7 @@
fig = make_subplots(rows=2, cols=1, vertical_spacing=0.2, shared_yaxes=False, shared_xaxes=False,
row_heights=[0.1, 0.9],
- subplot_titles=['Drift Score - ' + scorer_name, 'Distribution Plot'])
+ subplot_titles=[f'Drift Score ({scorer_name})', 'Distribution Plot'])
fig.add_traces(bar_traces, rows=[1] * len(bar_traces), cols=[1] * len(bar_traces))
fig.add_traces(dist_traces, rows=[2] * len(dist_traces), cols=[1] * len(dist_traces))
@@ -165,7 +165,7 @@
y=0.6),
width=700,
height=400,
- title=plot_title
+ title=dict(text=plot_title, x=0.5, xanchor='center')
)
fig.update_layout(shared_layout)
| {"golden_diff": "diff --git a/deepchecks/utils/distribution/drift.py b/deepchecks/utils/distribution/drift.py\n--- a/deepchecks/utils/distribution/drift.py\n+++ b/deepchecks/utils/distribution/drift.py\n@@ -149,7 +149,7 @@\n \n fig = make_subplots(rows=2, cols=1, vertical_spacing=0.2, shared_yaxes=False, shared_xaxes=False,\n row_heights=[0.1, 0.9],\n- subplot_titles=['Drift Score - ' + scorer_name, 'Distribution Plot'])\n+ subplot_titles=[f'Drift Score ({scorer_name})', 'Distribution Plot'])\n \n fig.add_traces(bar_traces, rows=[1] * len(bar_traces), cols=[1] * len(bar_traces))\n fig.add_traces(dist_traces, rows=[2] * len(dist_traces), cols=[1] * len(dist_traces))\n@@ -165,7 +165,7 @@\n y=0.6),\n width=700,\n height=400,\n- title=plot_title\n+ title=dict(text=plot_title, x=0.5, xanchor='center')\n )\n \n fig.update_layout(shared_layout)\n", "issue": "[BUG] Drift titles are not clearly connected to their plots\n\r\n\r\nThis example (taken from https://docs.deepchecks.com/en/latest/examples/vision/guides/quickstart_in_5_minutes.html) shows that it is very hard to understand that the \"area\" title is the property that is drifted. This happens in the common code for drift plots. Need to find a clearer way to indicate the drifted feature, either:\r\n1. Find a way to center and enlarge the title / add bold / underscore (while still having the \"Additional output\" title be superior to it)\r\n2. Add the feature name also to the title of the plots themselves. \n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Common utilities for distribution checks.\"\"\"\n\nfrom typing import Tuple, Union, Hashable, Callable\n\nfrom scipy.stats import wasserstein_distance\nimport numpy as np\nimport pandas as pd\n\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom deepchecks.utils.distribution.plot import drift_score_bar_traces, feature_distribution_traces\nfrom deepchecks.utils.distribution.preprocessing import preprocess_2_cat_cols_to_same_bins\nfrom deepchecks.core.errors import DeepchecksValueError\n\n\nPSI_MIN_PERCENTAGE = 0.01\n\n\n__all__ = ['calc_drift_and_plot']\n\n\ndef psi(expected_percents: np.ndarray, actual_percents: np.ndarray):\n \"\"\"\n Calculate the PSI (Population Stability Index).\n\n See https://www.lexjansen.com/wuss/2017/47_Final_Paper_PDF.pdf\n\n Parameters\n ----------\n expected_percents: np.ndarray\n array of percentages of each value in the expected distribution.\n actual_percents: : np.ndarray\n array of percentages of each value in the actual distribution.\n Returns\n -------\n psi\n The PSI score\n\n \"\"\"\n psi_value = 0\n for i in range(len(expected_percents)):\n # In order for the value not to diverge, we cap our min percentage value\n e_perc = max(expected_percents[i], PSI_MIN_PERCENTAGE)\n a_perc = max(actual_percents[i], PSI_MIN_PERCENTAGE)\n value = (e_perc - a_perc) * np.log(e_perc / a_perc)\n psi_value += value\n\n return psi_value\n\n\ndef earth_movers_distance(dist1: Union[np.ndarray, pd.Series], dist2: Union[np.ndarray, pd.Series]):\n \"\"\"\n Calculate the Earth Movers Distance (Wasserstein distance).\n\n See https://en.wikipedia.org/wiki/Wasserstein_metric\n\n Function is for numerical data only.\n\n Parameters\n ----------\n dist1 : Union[np.ndarray, pd.Series]\n array of numberical values.\n dist2 : Union[np.ndarray, pd.Series]\n array of numberical values to compare dist1 to.\n Returns\n -------\n Any\n the Wasserstein distance between the two distributions.\n\n \"\"\"\n unique1 = np.unique(dist1)\n unique2 = np.unique(dist2)\n\n sample_space = list(set(unique1).union(set(unique2)))\n\n val_max = max(sample_space)\n val_min = min(sample_space)\n\n if val_max == val_min:\n return 0\n\n dist1 = (dist1 - val_min) / (val_max - val_min)\n dist2 = (dist2 - val_min) / (val_max - val_min)\n\n return wasserstein_distance(dist1, dist2)\n\n\ndef calc_drift_and_plot(train_column: pd.Series, test_column: pd.Series, plot_title: Hashable,\n column_type: str, max_num_categories: int = 10) -> Tuple[float, str, Callable]:\n \"\"\"\n Calculate drift score per column.\n\n Parameters\n ----------\n train_column : pd.Series\n column from train dataset\n test_column : pd.Series\n same column from test dataset\n plot_title : Hashable\n title of plot\n column_type : str\n type of column (either \"numerical\" or \"categorical\")\n max_num_categories : int , default: 10\n Max number of allowed categories. If there are more, they are binned into an \"Other\" category.\n Returns\n -------\n Tuple[float, str, Callable]\n drift score of the difference between the two columns' distributions (Earth movers distance for\n numerical, PSI for categorical)\n graph comparing the two distributions (density for numerical, stack bar for categorical)\n \"\"\"\n train_dist = train_column.dropna().values.reshape(-1)\n test_dist = test_column.dropna().values.reshape(-1)\n\n if column_type == 'numerical':\n scorer_name = \"Earth Mover's Distance\"\n\n train_dist = train_dist.astype('float')\n test_dist = test_dist.astype('float')\n\n score = earth_movers_distance(dist1=train_dist, dist2=test_dist)\n\n bar_traces, bar_x_axis, bar_y_axis = drift_score_bar_traces(score)\n dist_traces, dist_x_axis, dist_y_axis = feature_distribution_traces(train_dist, test_dist)\n\n elif column_type == 'categorical':\n scorer_name = 'PSI'\n expected_percents, actual_percents, _ = \\\n preprocess_2_cat_cols_to_same_bins(dist1=train_dist, dist2=test_dist, max_num_categories=max_num_categories)\n score = psi(expected_percents=expected_percents, actual_percents=actual_percents)\n\n bar_traces, bar_x_axis, bar_y_axis = drift_score_bar_traces(score, bar_max=1)\n dist_traces, dist_x_axis, dist_y_axis = feature_distribution_traces(train_dist, test_dist, is_categorical=True,\n max_num_categories=max_num_categories)\n else:\n # Should never reach here\n raise DeepchecksValueError(f'Unsupported column type for drift: {column_type}')\n\n fig = make_subplots(rows=2, cols=1, vertical_spacing=0.2, shared_yaxes=False, shared_xaxes=False,\n row_heights=[0.1, 0.9],\n subplot_titles=['Drift Score - ' + scorer_name, 'Distribution Plot'])\n\n fig.add_traces(bar_traces, rows=[1] * len(bar_traces), cols=[1] * len(bar_traces))\n fig.add_traces(dist_traces, rows=[2] * len(dist_traces), cols=[1] * len(dist_traces))\n\n shared_layout = go.Layout(\n xaxis=bar_x_axis,\n yaxis=bar_y_axis,\n xaxis2=dist_x_axis,\n yaxis2=dist_y_axis,\n legend=dict(\n title='Dataset',\n yanchor='top',\n y=0.6),\n width=700,\n height=400,\n title=plot_title\n )\n\n fig.update_layout(shared_layout)\n\n return score, scorer_name, fig\n", "path": "deepchecks/utils/distribution/drift.py"}]} | 2,586 | 276 |
gh_patches_debug_28599 | rasdani/github-patches | git_diff | google__mobly-538 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Errors in `teardown_class` are not properly recorded
If Mobly encounters an error in the cleanup stage of `teardown_class`, the error would not be recorded anywhere except the cli output, which makes debugging difficult.
This points to a bigger problem: the errors occurred in base test and runner themselves are not clearly handled.
</issue>
<code>
[start of mobly/controller_manager.py]
1 # Copyright 2018 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """ Module for Mobly controller management."""
15 import collections
16 import copy
17 import logging
18 import yaml
19
20 from mobly import records
21 from mobly import signals
22
23
24 def verify_controller_module(module):
25 """Verifies a module object follows the required interface for
26 controllers.
27
28 The interface is explained in the docstring of
29 `base_test.BaseTestClass.register_controller`.
30
31 Args:
32 module: An object that is a controller module. This is usually
33 imported with import statements or loaded by importlib.
34
35 Raises:
36 ControllerError: if the module does not match the Mobly controller
37 interface, or one of the required members is null.
38 """
39 required_attributes = ('create', 'destroy', 'MOBLY_CONTROLLER_CONFIG_NAME')
40 for attr in required_attributes:
41 if not hasattr(module, attr):
42 raise signals.ControllerError(
43 'Module %s missing required controller module attribute'
44 ' %s.' % (module.__name__, attr))
45 if not getattr(module, attr):
46 raise signals.ControllerError(
47 'Controller interface %s in %s cannot be null.' %
48 (attr, module.__name__))
49
50
51 class ControllerManager(object):
52 """Manages the controller objects for Mobly.
53
54 This manages the life cycles and info retrieval of all controller objects
55 used in a test.
56
57 Attributes:
58 controller_configs: dict, controller configs provided by the user via
59 test bed config.
60 """
61
62 def __init__(self, class_name, controller_configs):
63 # Controller object management.
64 self._controller_objects = collections.OrderedDict(
65 ) # controller_name: objects
66 self._controller_modules = {} # controller_name: module
67 self._class_name = class_name
68 self.controller_configs = controller_configs
69
70 def register_controller(self, module, required=True, min_number=1):
71 """Loads a controller module and returns its loaded devices.
72
73 This is to be used in a mobly test class.
74
75 Args:
76 module: A module that follows the controller module interface.
77 required: A bool. If True, failing to register the specified
78 controller module raises exceptions. If False, the objects
79 failed to instantiate will be skipped.
80 min_number: An integer that is the minimum number of controller
81 objects to be created. Default is one, since you should not
82 register a controller module without expecting at least one
83 object.
84
85 Returns:
86 A list of controller objects instantiated from controller_module, or
87 None if no config existed for this controller and it was not a
88 required controller.
89
90 Raises:
91 ControllerError:
92 * The controller module has already been registered.
93 * The actual number of objects instantiated is less than the
94 * `min_number`.
95 * `required` is True and no corresponding config can be found.
96 * Any other error occurred in the registration process.
97 """
98 verify_controller_module(module)
99 # Use the module's name as the ref name
100 module_ref_name = module.__name__.split('.')[-1]
101 if module_ref_name in self._controller_objects:
102 raise signals.ControllerError(
103 'Controller module %s has already been registered. It cannot '
104 'be registered again.' % module_ref_name)
105 # Create controller objects.
106 module_config_name = module.MOBLY_CONTROLLER_CONFIG_NAME
107 if module_config_name not in self.controller_configs:
108 if required:
109 raise signals.ControllerError(
110 'No corresponding config found for %s' %
111 module_config_name)
112 logging.warning(
113 'No corresponding config found for optional controller %s',
114 module_config_name)
115 return None
116 try:
117 # Make a deep copy of the config to pass to the controller module,
118 # in case the controller module modifies the config internally.
119 original_config = self.controller_configs[module_config_name]
120 controller_config = copy.deepcopy(original_config)
121 objects = module.create(controller_config)
122 except:
123 logging.exception(
124 'Failed to initialize objects for controller %s, abort!',
125 module_config_name)
126 raise
127 if not isinstance(objects, list):
128 raise signals.ControllerError(
129 'Controller module %s did not return a list of objects, abort.'
130 % module_ref_name)
131 # Check we got enough controller objects to continue.
132 actual_number = len(objects)
133 if actual_number < min_number:
134 module.destroy(objects)
135 raise signals.ControllerError(
136 'Expected to get at least %d controller objects, got %d.' %
137 (min_number, actual_number))
138 # Save a shallow copy of the list for internal usage, so tests can't
139 # affect internal registry by manipulating the object list.
140 self._controller_objects[module_ref_name] = copy.copy(objects)
141 logging.debug('Found %d objects for controller %s', len(objects),
142 module_config_name)
143 self._controller_modules[module_ref_name] = module
144 return objects
145
146 def unregister_controllers(self):
147 """Destroy controller objects and clear internal registry.
148
149 This will be called after each test class.
150 """
151 # TODO(xpconanfan): actually record these errors instead of just
152 # logging them.
153 for name, module in self._controller_modules.items():
154 logging.debug('Destroying %s.', name)
155 try:
156 module.destroy(self._controller_objects[name])
157 except:
158 logging.exception('Exception occurred destroying %s.', name)
159 self._controller_objects = collections.OrderedDict()
160 self._controller_modules = {}
161
162 def _create_controller_info_record(self, controller_module_name):
163 """Creates controller info record for a particular controller type.
164
165 Info is retrieved from all the controller objects spawned from the
166 specified module, using the controller module's `get_info` function.
167
168 Args:
169 controller_module_name: string, the name of the controller module
170 to retrieve info from.
171
172 Returns:
173 A records.ControllerInfoRecord object.
174 """
175 module = self._controller_modules[controller_module_name]
176 controller_info = None
177 try:
178 controller_info = module.get_info(
179 copy.copy(self._controller_objects[controller_module_name]))
180 except AttributeError:
181 logging.warning('No optional debug info found for controller '
182 '%s. To provide it, implement `get_info`.',
183 controller_module_name)
184 try:
185 yaml.dump(controller_info)
186 except TypeError:
187 logging.warning('The info of controller %s in class "%s" is not '
188 'YAML serializable! Coercing it to string.',
189 controller_module_name, self._class_name)
190 controller_info = str(controller_info)
191 return records.ControllerInfoRecord(
192 self._class_name, module.MOBLY_CONTROLLER_CONFIG_NAME,
193 controller_info)
194
195 def get_controller_info_records(self):
196 """Get the info records for all the controller objects in the manager.
197
198 New info records for each controller object are created for every call
199 so the latest info is included.
200
201 Returns:
202 List of records.ControllerInfoRecord objects. Each opject conatins
203 the info of a type of controller
204 """
205 info_records = []
206 for controller_module_name in self._controller_objects.keys():
207 record = self._create_controller_info_record(
208 controller_module_name)
209 if record:
210 info_records.append(record)
211 return info_records
212
[end of mobly/controller_manager.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mobly/controller_manager.py b/mobly/controller_manager.py
--- a/mobly/controller_manager.py
+++ b/mobly/controller_manager.py
@@ -17,6 +17,7 @@
import logging
import yaml
+from mobly import expects
from mobly import records
from mobly import signals
@@ -152,10 +153,9 @@
# logging them.
for name, module in self._controller_modules.items():
logging.debug('Destroying %s.', name)
- try:
+ with expects.expect_no_raises(
+ 'Exception occurred destroying %s.' % name):
module.destroy(self._controller_objects[name])
- except:
- logging.exception('Exception occurred destroying %s.', name)
self._controller_objects = collections.OrderedDict()
self._controller_modules = {}
@@ -204,8 +204,11 @@
"""
info_records = []
for controller_module_name in self._controller_objects.keys():
- record = self._create_controller_info_record(
- controller_module_name)
- if record:
- info_records.append(record)
+ with expects.expect_no_raises(
+ 'Failed to collect controller info from %s' %
+ controller_module_name):
+ record = self._create_controller_info_record(
+ controller_module_name)
+ if record:
+ info_records.append(record)
return info_records
| {"golden_diff": "diff --git a/mobly/controller_manager.py b/mobly/controller_manager.py\n--- a/mobly/controller_manager.py\n+++ b/mobly/controller_manager.py\n@@ -17,6 +17,7 @@\n import logging\n import yaml\n \n+from mobly import expects\n from mobly import records\n from mobly import signals\n \n@@ -152,10 +153,9 @@\n # logging them.\n for name, module in self._controller_modules.items():\n logging.debug('Destroying %s.', name)\n- try:\n+ with expects.expect_no_raises(\n+ 'Exception occurred destroying %s.' % name):\n module.destroy(self._controller_objects[name])\n- except:\n- logging.exception('Exception occurred destroying %s.', name)\n self._controller_objects = collections.OrderedDict()\n self._controller_modules = {}\n \n@@ -204,8 +204,11 @@\n \"\"\"\n info_records = []\n for controller_module_name in self._controller_objects.keys():\n- record = self._create_controller_info_record(\n- controller_module_name)\n- if record:\n- info_records.append(record)\n+ with expects.expect_no_raises(\n+ 'Failed to collect controller info from %s' %\n+ controller_module_name):\n+ record = self._create_controller_info_record(\n+ controller_module_name)\n+ if record:\n+ info_records.append(record)\n return info_records\n", "issue": "Errors in `teardown_class` are not properly recorded\nIf Mobly encounters an error in the cleanup stage of `teardown_class`, the error would not be recorded anywhere except the cli output, which makes debugging difficult.\r\n\r\nThis points to a bigger problem: the errors occurred in base test and runner themselves are not clearly handled.\n", "before_files": [{"content": "# Copyright 2018 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Module for Mobly controller management.\"\"\"\nimport collections\nimport copy\nimport logging\nimport yaml\n\nfrom mobly import records\nfrom mobly import signals\n\n\ndef verify_controller_module(module):\n \"\"\"Verifies a module object follows the required interface for\n controllers.\n\n The interface is explained in the docstring of\n `base_test.BaseTestClass.register_controller`.\n\n Args:\n module: An object that is a controller module. This is usually\n imported with import statements or loaded by importlib.\n\n Raises:\n ControllerError: if the module does not match the Mobly controller\n interface, or one of the required members is null.\n \"\"\"\n required_attributes = ('create', 'destroy', 'MOBLY_CONTROLLER_CONFIG_NAME')\n for attr in required_attributes:\n if not hasattr(module, attr):\n raise signals.ControllerError(\n 'Module %s missing required controller module attribute'\n ' %s.' % (module.__name__, attr))\n if not getattr(module, attr):\n raise signals.ControllerError(\n 'Controller interface %s in %s cannot be null.' %\n (attr, module.__name__))\n\n\nclass ControllerManager(object):\n \"\"\"Manages the controller objects for Mobly.\n\n This manages the life cycles and info retrieval of all controller objects\n used in a test.\n\n Attributes:\n controller_configs: dict, controller configs provided by the user via\n test bed config.\n \"\"\"\n\n def __init__(self, class_name, controller_configs):\n # Controller object management.\n self._controller_objects = collections.OrderedDict(\n ) # controller_name: objects\n self._controller_modules = {} # controller_name: module\n self._class_name = class_name\n self.controller_configs = controller_configs\n\n def register_controller(self, module, required=True, min_number=1):\n \"\"\"Loads a controller module and returns its loaded devices.\n\n This is to be used in a mobly test class.\n\n Args:\n module: A module that follows the controller module interface.\n required: A bool. If True, failing to register the specified\n controller module raises exceptions. If False, the objects\n failed to instantiate will be skipped.\n min_number: An integer that is the minimum number of controller\n objects to be created. Default is one, since you should not\n register a controller module without expecting at least one\n object.\n\n Returns:\n A list of controller objects instantiated from controller_module, or\n None if no config existed for this controller and it was not a\n required controller.\n\n Raises:\n ControllerError:\n * The controller module has already been registered.\n * The actual number of objects instantiated is less than the\n * `min_number`.\n * `required` is True and no corresponding config can be found.\n * Any other error occurred in the registration process.\n \"\"\"\n verify_controller_module(module)\n # Use the module's name as the ref name\n module_ref_name = module.__name__.split('.')[-1]\n if module_ref_name in self._controller_objects:\n raise signals.ControllerError(\n 'Controller module %s has already been registered. It cannot '\n 'be registered again.' % module_ref_name)\n # Create controller objects.\n module_config_name = module.MOBLY_CONTROLLER_CONFIG_NAME\n if module_config_name not in self.controller_configs:\n if required:\n raise signals.ControllerError(\n 'No corresponding config found for %s' %\n module_config_name)\n logging.warning(\n 'No corresponding config found for optional controller %s',\n module_config_name)\n return None\n try:\n # Make a deep copy of the config to pass to the controller module,\n # in case the controller module modifies the config internally.\n original_config = self.controller_configs[module_config_name]\n controller_config = copy.deepcopy(original_config)\n objects = module.create(controller_config)\n except:\n logging.exception(\n 'Failed to initialize objects for controller %s, abort!',\n module_config_name)\n raise\n if not isinstance(objects, list):\n raise signals.ControllerError(\n 'Controller module %s did not return a list of objects, abort.'\n % module_ref_name)\n # Check we got enough controller objects to continue.\n actual_number = len(objects)\n if actual_number < min_number:\n module.destroy(objects)\n raise signals.ControllerError(\n 'Expected to get at least %d controller objects, got %d.' %\n (min_number, actual_number))\n # Save a shallow copy of the list for internal usage, so tests can't\n # affect internal registry by manipulating the object list.\n self._controller_objects[module_ref_name] = copy.copy(objects)\n logging.debug('Found %d objects for controller %s', len(objects),\n module_config_name)\n self._controller_modules[module_ref_name] = module\n return objects\n\n def unregister_controllers(self):\n \"\"\"Destroy controller objects and clear internal registry.\n\n This will be called after each test class.\n \"\"\"\n # TODO(xpconanfan): actually record these errors instead of just\n # logging them.\n for name, module in self._controller_modules.items():\n logging.debug('Destroying %s.', name)\n try:\n module.destroy(self._controller_objects[name])\n except:\n logging.exception('Exception occurred destroying %s.', name)\n self._controller_objects = collections.OrderedDict()\n self._controller_modules = {}\n\n def _create_controller_info_record(self, controller_module_name):\n \"\"\"Creates controller info record for a particular controller type.\n\n Info is retrieved from all the controller objects spawned from the\n specified module, using the controller module's `get_info` function.\n\n Args:\n controller_module_name: string, the name of the controller module\n to retrieve info from.\n\n Returns:\n A records.ControllerInfoRecord object.\n \"\"\"\n module = self._controller_modules[controller_module_name]\n controller_info = None\n try:\n controller_info = module.get_info(\n copy.copy(self._controller_objects[controller_module_name]))\n except AttributeError:\n logging.warning('No optional debug info found for controller '\n '%s. To provide it, implement `get_info`.',\n controller_module_name)\n try:\n yaml.dump(controller_info)\n except TypeError:\n logging.warning('The info of controller %s in class \"%s\" is not '\n 'YAML serializable! Coercing it to string.',\n controller_module_name, self._class_name)\n controller_info = str(controller_info)\n return records.ControllerInfoRecord(\n self._class_name, module.MOBLY_CONTROLLER_CONFIG_NAME,\n controller_info)\n\n def get_controller_info_records(self):\n \"\"\"Get the info records for all the controller objects in the manager.\n\n New info records for each controller object are created for every call\n so the latest info is included.\n\n Returns:\n List of records.ControllerInfoRecord objects. Each opject conatins\n the info of a type of controller\n \"\"\"\n info_records = []\n for controller_module_name in self._controller_objects.keys():\n record = self._create_controller_info_record(\n controller_module_name)\n if record:\n info_records.append(record)\n return info_records\n", "path": "mobly/controller_manager.py"}]} | 2,798 | 309 |
gh_patches_debug_12997 | rasdani/github-patches | git_diff | conan-io__conan-center-index-18559 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[package] util-linux-libuuid uses wrong cmake target
### Description
In the following lines, the `util-linux-libuuid` recipe sets the cmake target to be `LibUUID::LibUUID` with a filename of `LibUUID-config.cmake`:
https://github.com/conan-io/conan-center-index/blob/61c4f7819e6cd3594a57f6c3847f94ab86de623f/recipes/util-linux-libuuid/all/conanfile.py#L112-L113
This was based on the internal practice that Kitware has for their internal libuuid cmake module, however this is not public and a number of packages (czmq, cppcommon) seem to assume a `libuuid::libuuid` target. These change should be reverted such that these packages can utilise util-linux-libuuid without a requirement to be patched.
### Package and Environment Details
N/A
### Conan profile
N/A
### Steps to reproduce
N/A
### Logs
<details><summary>Click to expand log</summary>
```
Put your log output here
```
</details>
</issue>
<code>
[start of recipes/util-linux-libuuid/all/conanfile.py]
1 from conan import ConanFile
2 from conan.errors import ConanInvalidConfiguration
3 from conan.tools.apple import fix_apple_shared_install_name
4 from conan.tools.files import copy, get, rm, rmdir
5 from conan.tools.gnu import Autotools, AutotoolsToolchain, AutotoolsDeps
6 from conan.tools.layout import basic_layout
7 from conan.tools.scm import Version
8 import os
9
10 required_conan_version = ">=1.53.0"
11
12
13 class UtilLinuxLibuuidConan(ConanFile):
14 name = "util-linux-libuuid"
15 description = "Universally unique id library"
16 url = "https://github.com/conan-io/conan-center-index"
17 homepage = "https://github.com/util-linux/util-linux.git"
18 license = "BSD-3-Clause"
19 topics = "id", "identifier", "unique", "uuid"
20 package_type = "library"
21 provides = "libuuid"
22 settings = "os", "arch", "compiler", "build_type"
23 options = {
24 "shared": [True, False],
25 "fPIC": [True, False],
26 }
27 default_options = {
28 "shared": False,
29 "fPIC": True,
30 }
31
32 @property
33 def _has_sys_file_header(self):
34 return self.settings.os in ["FreeBSD", "Linux", "Macos"]
35
36 def config_options(self):
37 if self.settings.os == "Windows":
38 del self.options.fPIC
39
40 def configure(self):
41 if self.options.shared:
42 self.options.rm_safe("fPIC")
43 self.settings.rm_safe("compiler.cppstd")
44 self.settings.rm_safe("compiler.libcxx")
45
46 def layout(self):
47 basic_layout(self, src_folder="src")
48
49 def _minimum_compiler_version(self, compiler, build_type):
50 min_version = {
51 "gcc": {
52 "Release": "4",
53 "Debug": "8",
54 },
55 "clang": {
56 "Release": "3",
57 "Debug": "3",
58 },
59 "apple-clang": {
60 "Release": "5",
61 "Debug": "5",
62 },
63 }
64 return min_version.get(str(compiler), {}).get(str(build_type), "0")
65
66 def validate(self):
67 min_version = self._minimum_compiler_version(self.settings.compiler, self.settings.build_type)
68 if Version(self.settings.compiler.version) < min_version:
69 raise ConanInvalidConfiguration(f"{self.settings.compiler} {self.settings.compiler.version} does not meet the minimum version requirement of version {min_version}")
70 if self.settings.os == "Windows":
71 raise ConanInvalidConfiguration(f"{self.ref} is not supported on Windows")
72
73 def requirements(self):
74 if self.settings.os == "Macos":
75 # Required because libintl.{a,dylib} is not distributed via libc on Macos
76 self.requires("libgettext/0.21")
77
78 def source(self):
79 get(self, **self.conan_data["sources"][self.version], strip_root=True)
80
81 def generate(self):
82 tc = AutotoolsToolchain(self)
83 tc.configure_args.append("--disable-all-programs")
84 tc.configure_args.append("--enable-libuuid")
85 if self._has_sys_file_header:
86 tc.extra_defines.append("HAVE_SYS_FILE_H")
87 if "x86" in self.settings.arch:
88 tc.extra_cflags.append("-mstackrealign")
89 tc.generate()
90
91 deps = AutotoolsDeps(self)
92 deps.generate()
93
94 def build(self):
95 autotools = Autotools(self)
96 autotools.configure()
97 autotools.make()
98
99 def package(self):
100 copy(self, "COPYING.BSD-3-Clause", src=os.path.join(self.source_folder, "Documentation", "licenses"), dst=os.path.join(self.package_folder, "licenses"))
101 autotools = Autotools(self)
102 autotools.install()
103 rm(self, "*.la", os.path.join(self.package_folder, "lib"))
104 rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
105 rmdir(self, os.path.join(self.package_folder, "bin"))
106 rmdir(self, os.path.join(self.package_folder, "sbin"))
107 rmdir(self, os.path.join(self.package_folder, "share"))
108 fix_apple_shared_install_name(self)
109
110 def package_info(self):
111 self.cpp_info.set_property("pkg_config_name", "uuid")
112 self.cpp_info.set_property("cmake_target_name", "LibUUID::LibUUID")
113 self.cpp_info.set_property("cmake_file_name", "LibUUID")
114 self.cpp_info.libs = ["uuid"]
115 self.cpp_info.includedirs.append(os.path.join("include", "uuid"))
116
[end of recipes/util-linux-libuuid/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/util-linux-libuuid/all/conanfile.py b/recipes/util-linux-libuuid/all/conanfile.py
--- a/recipes/util-linux-libuuid/all/conanfile.py
+++ b/recipes/util-linux-libuuid/all/conanfile.py
@@ -109,7 +109,10 @@
def package_info(self):
self.cpp_info.set_property("pkg_config_name", "uuid")
- self.cpp_info.set_property("cmake_target_name", "LibUUID::LibUUID")
- self.cpp_info.set_property("cmake_file_name", "LibUUID")
+ self.cpp_info.set_property("cmake_target_name", "libuuid::libuuid")
+ self.cpp_info.set_property("cmake_file_name", "libuuid")
+ # Maintain alias to `LibUUID::LibUUID` for previous version of the recipe
+ self.cpp_info.set_property("cmake_target_aliases", ["LibUUID::LibUUID"])
+
self.cpp_info.libs = ["uuid"]
self.cpp_info.includedirs.append(os.path.join("include", "uuid"))
| {"golden_diff": "diff --git a/recipes/util-linux-libuuid/all/conanfile.py b/recipes/util-linux-libuuid/all/conanfile.py\n--- a/recipes/util-linux-libuuid/all/conanfile.py\n+++ b/recipes/util-linux-libuuid/all/conanfile.py\n@@ -109,7 +109,10 @@\n \n def package_info(self):\n self.cpp_info.set_property(\"pkg_config_name\", \"uuid\")\n- self.cpp_info.set_property(\"cmake_target_name\", \"LibUUID::LibUUID\")\n- self.cpp_info.set_property(\"cmake_file_name\", \"LibUUID\")\n+ self.cpp_info.set_property(\"cmake_target_name\", \"libuuid::libuuid\")\n+ self.cpp_info.set_property(\"cmake_file_name\", \"libuuid\")\n+ # Maintain alias to `LibUUID::LibUUID` for previous version of the recipe\n+ self.cpp_info.set_property(\"cmake_target_aliases\", [\"LibUUID::LibUUID\"])\n+\n self.cpp_info.libs = [\"uuid\"]\n self.cpp_info.includedirs.append(os.path.join(\"include\", \"uuid\"))\n", "issue": "[package] util-linux-libuuid uses wrong cmake target\n### Description\n\nIn the following lines, the `util-linux-libuuid` recipe sets the cmake target to be `LibUUID::LibUUID` with a filename of `LibUUID-config.cmake`:\r\n\r\nhttps://github.com/conan-io/conan-center-index/blob/61c4f7819e6cd3594a57f6c3847f94ab86de623f/recipes/util-linux-libuuid/all/conanfile.py#L112-L113\r\n\r\nThis was based on the internal practice that Kitware has for their internal libuuid cmake module, however this is not public and a number of packages (czmq, cppcommon) seem to assume a `libuuid::libuuid` target. These change should be reverted such that these packages can utilise util-linux-libuuid without a requirement to be patched.\n\n### Package and Environment Details\n\nN/A\n\n### Conan profile\n\nN/A\r\n\n\n### Steps to reproduce\n\nN/A\n\n### Logs\n\n<details><summary>Click to expand log</summary>\r\n\r\n```\r\nPut your log output here\r\n```\r\n\r\n</details>\r\n\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.apple import fix_apple_shared_install_name\nfrom conan.tools.files import copy, get, rm, rmdir\nfrom conan.tools.gnu import Autotools, AutotoolsToolchain, AutotoolsDeps\nfrom conan.tools.layout import basic_layout\nfrom conan.tools.scm import Version\nimport os\n\nrequired_conan_version = \">=1.53.0\"\n\n\nclass UtilLinuxLibuuidConan(ConanFile):\n name = \"util-linux-libuuid\"\n description = \"Universally unique id library\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/util-linux/util-linux.git\"\n license = \"BSD-3-Clause\"\n topics = \"id\", \"identifier\", \"unique\", \"uuid\"\n package_type = \"library\"\n provides = \"libuuid\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n @property\n def _has_sys_file_header(self):\n return self.settings.os in [\"FreeBSD\", \"Linux\", \"Macos\"]\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n self.settings.rm_safe(\"compiler.cppstd\")\n self.settings.rm_safe(\"compiler.libcxx\")\n\n def layout(self):\n basic_layout(self, src_folder=\"src\")\n\n def _minimum_compiler_version(self, compiler, build_type):\n min_version = {\n \"gcc\": {\n \"Release\": \"4\",\n \"Debug\": \"8\",\n },\n \"clang\": {\n \"Release\": \"3\",\n \"Debug\": \"3\",\n },\n \"apple-clang\": {\n \"Release\": \"5\",\n \"Debug\": \"5\",\n },\n }\n return min_version.get(str(compiler), {}).get(str(build_type), \"0\")\n\n def validate(self):\n min_version = self._minimum_compiler_version(self.settings.compiler, self.settings.build_type)\n if Version(self.settings.compiler.version) < min_version:\n raise ConanInvalidConfiguration(f\"{self.settings.compiler} {self.settings.compiler.version} does not meet the minimum version requirement of version {min_version}\")\n if self.settings.os == \"Windows\":\n raise ConanInvalidConfiguration(f\"{self.ref} is not supported on Windows\")\n\n def requirements(self):\n if self.settings.os == \"Macos\":\n # Required because libintl.{a,dylib} is not distributed via libc on Macos\n self.requires(\"libgettext/0.21\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n\n def generate(self):\n tc = AutotoolsToolchain(self)\n tc.configure_args.append(\"--disable-all-programs\")\n tc.configure_args.append(\"--enable-libuuid\")\n if self._has_sys_file_header:\n tc.extra_defines.append(\"HAVE_SYS_FILE_H\")\n if \"x86\" in self.settings.arch:\n tc.extra_cflags.append(\"-mstackrealign\")\n tc.generate()\n\n deps = AutotoolsDeps(self)\n deps.generate()\n\n def build(self):\n autotools = Autotools(self)\n autotools.configure()\n autotools.make()\n\n def package(self):\n copy(self, \"COPYING.BSD-3-Clause\", src=os.path.join(self.source_folder, \"Documentation\", \"licenses\"), dst=os.path.join(self.package_folder, \"licenses\"))\n autotools = Autotools(self)\n autotools.install()\n rm(self, \"*.la\", os.path.join(self.package_folder, \"lib\"))\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n rmdir(self, os.path.join(self.package_folder, \"bin\"))\n rmdir(self, os.path.join(self.package_folder, \"sbin\"))\n rmdir(self, os.path.join(self.package_folder, \"share\"))\n fix_apple_shared_install_name(self)\n\n def package_info(self):\n self.cpp_info.set_property(\"pkg_config_name\", \"uuid\")\n self.cpp_info.set_property(\"cmake_target_name\", \"LibUUID::LibUUID\")\n self.cpp_info.set_property(\"cmake_file_name\", \"LibUUID\")\n self.cpp_info.libs = [\"uuid\"]\n self.cpp_info.includedirs.append(os.path.join(\"include\", \"uuid\"))\n", "path": "recipes/util-linux-libuuid/all/conanfile.py"}]} | 2,040 | 233 |
gh_patches_debug_959 | rasdani/github-patches | git_diff | getsentry__sentry-52329 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
fix(django): Disable admin on prod
Reported here: https://forum.sentry.io/t/sentry-django-admin-portal/12787?u=byk
</issue>
<code>
[start of src/sentry/conf/urls.py]
1 from __future__ import annotations
2
3 from django.conf import settings
4 from django.urls import URLPattern, URLResolver, re_path
5
6 from sentry.web.frontend import csrf_failure
7 from sentry.web.frontend.error_404 import Error404View
8 from sentry.web.frontend.error_500 import Error500View
9 from sentry.web.urls import urlpatterns as web_urlpatterns
10
11 handler404 = Error404View.as_view()
12 handler500 = Error500View.as_view()
13
14 urlpatterns: list[URLResolver | URLPattern] = [
15 re_path(
16 r"^500/",
17 handler500,
18 name="error-500",
19 ),
20 re_path(
21 r"^404/",
22 handler404,
23 name="error-404",
24 ),
25 re_path(
26 r"^403-csrf-failure/",
27 csrf_failure.view,
28 name="error-403-csrf-failure",
29 ),
30 ]
31
32 if "django.contrib.admin" in settings.INSTALLED_APPS:
33 from sentry import django_admin
34
35 urlpatterns += django_admin.urlpatterns
36
37 urlpatterns += web_urlpatterns
38
[end of src/sentry/conf/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/conf/urls.py b/src/sentry/conf/urls.py
--- a/src/sentry/conf/urls.py
+++ b/src/sentry/conf/urls.py
@@ -29,7 +29,7 @@
),
]
-if "django.contrib.admin" in settings.INSTALLED_APPS:
+if "django.contrib.admin" in settings.INSTALLED_APPS and settings.ADMIN_ENABLED:
from sentry import django_admin
urlpatterns += django_admin.urlpatterns
| {"golden_diff": "diff --git a/src/sentry/conf/urls.py b/src/sentry/conf/urls.py\n--- a/src/sentry/conf/urls.py\n+++ b/src/sentry/conf/urls.py\n@@ -29,7 +29,7 @@\n ),\n ]\n \n-if \"django.contrib.admin\" in settings.INSTALLED_APPS:\n+if \"django.contrib.admin\" in settings.INSTALLED_APPS and settings.ADMIN_ENABLED:\n from sentry import django_admin\n \n urlpatterns += django_admin.urlpatterns\n", "issue": "fix(django): Disable admin on prod\nReported here: https://forum.sentry.io/t/sentry-django-admin-portal/12787?u=byk\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom django.conf import settings\nfrom django.urls import URLPattern, URLResolver, re_path\n\nfrom sentry.web.frontend import csrf_failure\nfrom sentry.web.frontend.error_404 import Error404View\nfrom sentry.web.frontend.error_500 import Error500View\nfrom sentry.web.urls import urlpatterns as web_urlpatterns\n\nhandler404 = Error404View.as_view()\nhandler500 = Error500View.as_view()\n\nurlpatterns: list[URLResolver | URLPattern] = [\n re_path(\n r\"^500/\",\n handler500,\n name=\"error-500\",\n ),\n re_path(\n r\"^404/\",\n handler404,\n name=\"error-404\",\n ),\n re_path(\n r\"^403-csrf-failure/\",\n csrf_failure.view,\n name=\"error-403-csrf-failure\",\n ),\n]\n\nif \"django.contrib.admin\" in settings.INSTALLED_APPS:\n from sentry import django_admin\n\n urlpatterns += django_admin.urlpatterns\n\nurlpatterns += web_urlpatterns\n", "path": "src/sentry/conf/urls.py"}]} | 903 | 103 |
gh_patches_debug_26704 | rasdani/github-patches | git_diff | ManimCommunity__manim-1209 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ManimDirective needs a `ref_methods`
Some of the current documentation uses `:ref_functions:` in the example documentation which is supposed to link to the proper function; however, it is often the case that the function is actually a method. In these instances, a ref_methods would be appropriate to have, and the usage of this in the examples should be updated accordingly.
See https://github.com/ManimCommunity/manim/blob/master/docs/source/manim_directive.py#L133-L135
</issue>
<code>
[start of docs/source/manim_directive.py]
1 r"""
2 A directive for including Manim videos in a Sphinx document
3 ===========================================================
4
5 When rendering the HTML documentation, the ``.. manim::`` directive
6 implemented here allows to include rendered videos.
7
8 Its basic usage that allows processing **inline content**
9 looks as follows::
10
11 .. manim:: MyScene
12
13 class MyScene(Scene):
14 def construct(self):
15 ...
16
17 It is required to pass the name of the class representing the
18 scene to be rendered to the directive.
19
20 As a second application, the directive can also be used to
21 render scenes that are defined within doctests, for example::
22
23 .. manim:: DirectiveDoctestExample
24 :ref_classes: Dot
25
26 >>> dot = Dot(color=RED)
27 >>> dot.color
28 <Color #fc6255>
29 >>> class DirectiveDoctestExample(Scene):
30 ... def construct(self):
31 ... self.play(Create(dot))
32
33
34 Options
35 -------
36
37 Options can be passed as follows::
38
39 .. manim:: <Class name>
40 :<option name>: <value>
41
42 The following configuration options are supported by the
43 directive:
44
45 hide_source
46 If this flag is present without argument,
47 the source code is not displayed above the rendered video.
48
49 quality : {'low', 'medium', 'high', 'fourk'}
50 Controls render quality of the video, in analogy to
51 the corresponding command line flags.
52
53 save_as_gif
54 If this flag is present without argument,
55 the scene is rendered as a gif.
56
57 save_last_frame
58 If this flag is present without argument,
59 an image representing the last frame of the scene will
60 be rendered and displayed, instead of a video.
61
62 ref_classes
63 A list of classes, separated by spaces, that is
64 rendered in a reference block after the source code.
65
66 ref_functions
67 A list of functions and methods, separated by spaces,
68 that is rendered in a reference block after the source code.
69
70 """
71 from docutils import nodes
72 from docutils.parsers.rst import directives, Directive
73 from docutils.statemachine import StringList
74
75 import jinja2
76 import os
77 from os.path import relpath
78 from pathlib import Path
79 from typing import List
80
81 import shutil
82
83 from manim import QUALITIES
84
85 classnamedict = {}
86
87
88 class skip_manim_node(nodes.Admonition, nodes.Element):
89 pass
90
91
92 def visit(self, node, name=""):
93 self.visit_admonition(node, name)
94
95
96 def depart(self, node):
97 self.depart_admonition(node)
98
99
100 def process_name_list(option_input: str, reference_type: str) -> List[str]:
101 r"""Reformats a string of space separated class names
102 as a list of strings containing valid Sphinx references.
103
104 Tests
105 -----
106
107 ::
108
109 >>> process_name_list("Tex TexTemplate", "class")
110 [":class:`~.Tex`", ":class:`~.TexTemplate`"]
111 >>> process_name_list("Scene.play Mobject.rotate", "func")
112 [":func:`~.Scene.play`", ":func:`~.Mobject.rotate`"]
113 """
114 return [f":{reference_type}:`~.{name}`" for name in option_input.split()]
115
116
117 class ManimDirective(Directive):
118 r"""The manim directive, rendering videos while building
119 the documentation.
120
121 See the module docstring for documentation.
122 """
123 has_content = True
124 required_arguments = 1
125 optional_arguments = 0
126 option_spec = {
127 "hide_source": bool,
128 "quality": lambda arg: directives.choice(
129 arg, ("low", "medium", "high", "fourk")
130 ),
131 "save_as_gif": bool,
132 "save_last_frame": bool,
133 "ref_modules": lambda arg: process_name_list(arg, "mod"),
134 "ref_classes": lambda arg: process_name_list(arg, "class"),
135 "ref_functions": lambda arg: process_name_list(arg, "func"),
136 }
137 final_argument_whitespace = True
138
139 def run(self):
140 if "skip-manim" in self.state.document.settings.env.app.builder.tags.tags:
141 node = skip_manim_node()
142 self.state.nested_parse(
143 StringList(self.content[0]), self.content_offset, node
144 )
145 return [node]
146
147 from manim import config
148
149 global classnamedict
150
151 clsname = self.arguments[0]
152 if clsname not in classnamedict:
153 classnamedict[clsname] = 1
154 else:
155 classnamedict[clsname] += 1
156
157 hide_source = "hide_source" in self.options
158 save_as_gif = "save_as_gif" in self.options
159 save_last_frame = "save_last_frame" in self.options
160 assert not (save_as_gif and save_last_frame)
161
162 ref_content = (
163 self.options.get("ref_modules", [])
164 + self.options.get("ref_classes", [])
165 + self.options.get("ref_functions", [])
166 )
167 if ref_content:
168 ref_block = f"""
169 .. admonition:: Example References
170 :class: example-reference
171
172 {' '.join(ref_content)}"""
173 else:
174 ref_block = ""
175
176 if "quality" in self.options:
177 quality = f'{self.options["quality"]}_quality'
178 else:
179 quality = "example_quality"
180 frame_rate = QUALITIES[quality]["frame_rate"]
181 pixel_height = QUALITIES[quality]["pixel_height"]
182 pixel_width = QUALITIES[quality]["pixel_width"]
183 qualitydir = f"{pixel_height}p{frame_rate}"
184
185 state_machine = self.state_machine
186 document = state_machine.document
187
188 source_file_name = document.attributes["source"]
189 source_rel_name = relpath(source_file_name, setup.confdir)
190 source_rel_dir = os.path.dirname(source_rel_name)
191 while source_rel_dir.startswith(os.path.sep):
192 source_rel_dir = source_rel_dir[1:]
193
194 dest_dir = os.path.abspath(
195 os.path.join(setup.app.builder.outdir, source_rel_dir)
196 )
197 if not os.path.exists(dest_dir):
198 os.makedirs(dest_dir)
199
200 source_block = [
201 ".. code-block:: python",
202 "",
203 *[" " + line for line in self.content],
204 ]
205 source_block = "\n".join(source_block)
206
207 config.media_dir = Path(setup.confdir) / "media"
208 config.images_dir = "{media_dir}/images"
209 config.video_dir = "{media_dir}/videos/{quality}"
210 output_file = f"{clsname}-{classnamedict[clsname]}"
211 config.assets_dir = Path("_static")
212
213 config_code = [
214 f'config["frame_rate"] = {frame_rate}',
215 f'config["pixel_height"] = {pixel_height}',
216 f'config["pixel_width"] = {pixel_width}',
217 f'config["save_last_frame"] = {save_last_frame}',
218 f'config["save_as_gif"] = {save_as_gif}',
219 f'config["output_file"] = r"{output_file}"',
220 ]
221
222 user_code = self.content
223 if user_code[0].startswith(">>> "): # check whether block comes from doctest
224 user_code = [
225 line[4:] for line in user_code if line.startswith((">>> ", "... "))
226 ]
227
228 code = [
229 "from manim import *",
230 *config_code,
231 *user_code,
232 f"{clsname}().render()",
233 ]
234 exec("\n".join(code), globals())
235
236 # copy video file to output directory
237 if not (save_as_gif or save_last_frame):
238 filename = f"{output_file}.mp4"
239 filesrc = config.get_dir("video_dir") / filename
240 destfile = os.path.join(dest_dir, filename)
241 shutil.copyfile(filesrc, destfile)
242 elif save_as_gif:
243 filename = f"{output_file}.gif"
244 filesrc = config.get_dir("video_dir") / filename
245 elif save_last_frame:
246 filename = f"{output_file}.png"
247 filesrc = config.get_dir("images_dir") / filename
248 else:
249 raise ValueError("Invalid combination of render flags received.")
250
251 rendered_template = jinja2.Template(TEMPLATE).render(
252 clsname=clsname,
253 clsname_lowercase=clsname.lower(),
254 hide_source=hide_source,
255 filesrc_rel=os.path.relpath(filesrc, setup.confdir),
256 output_file=output_file,
257 save_last_frame=save_last_frame,
258 save_as_gif=save_as_gif,
259 source_block=source_block,
260 ref_block=ref_block,
261 )
262 state_machine.insert_input(
263 rendered_template.split("\n"), source=document.attributes["source"]
264 )
265
266 return []
267
268
269 def setup(app):
270 import manim
271
272 app.add_node(skip_manim_node, html=(visit, depart))
273
274 setup.app = app
275 setup.config = app.config
276 setup.confdir = app.confdir
277
278 app.add_directive("manim", ManimDirective)
279
280 metadata = {"parallel_read_safe": False, "parallel_write_safe": True}
281 return metadata
282
283
284 TEMPLATE = r"""
285 {% if not hide_source %}
286 .. raw:: html
287
288 <div class="manim-example">
289
290 {% endif %}
291
292 {% if not (save_as_gif or save_last_frame) %}
293 .. raw:: html
294
295 <video id="{{ clsname_lowercase }}" class="manim-video" controls loop autoplay src="./{{ output_file }}.mp4"></video>
296 {% elif save_as_gif %}
297 .. image:: /{{ filesrc_rel }}
298 :align: center
299 :name: {{ clsname_lowercase }}
300 {% elif save_last_frame %}
301 .. image:: /{{ filesrc_rel }}
302 :align: center
303 :name: {{ clsname_lowercase }}
304 {% endif %}
305 {% if not hide_source %}
306 .. raw:: html
307
308 <h5 class="example-header">{{ clsname }}<a class="headerlink" href="#{{ clsname_lowercase }}">¶</a></h5>
309
310 {{ source_block }}
311 {{ ref_block }}
312 {% endif %}
313
314 .. raw:: html
315
316 </div>
317 """
318
[end of docs/source/manim_directive.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/source/manim_directive.py b/docs/source/manim_directive.py
--- a/docs/source/manim_directive.py
+++ b/docs/source/manim_directive.py
@@ -64,7 +64,11 @@
rendered in a reference block after the source code.
ref_functions
- A list of functions and methods, separated by spaces,
+ A list of functions, separated by spaces,
+ that is rendered in a reference block after the source code.
+
+ ref_methods
+ A list of methods, separated by spaces,
that is rendered in a reference block after the source code.
"""
@@ -133,6 +137,7 @@
"ref_modules": lambda arg: process_name_list(arg, "mod"),
"ref_classes": lambda arg: process_name_list(arg, "class"),
"ref_functions": lambda arg: process_name_list(arg, "func"),
+ "ref_methods": lambda arg: process_name_list(arg, "meth"),
}
final_argument_whitespace = True
@@ -163,6 +168,7 @@
self.options.get("ref_modules", [])
+ self.options.get("ref_classes", [])
+ self.options.get("ref_functions", [])
+ + self.options.get("ref_methods", [])
)
if ref_content:
ref_block = f"""
| {"golden_diff": "diff --git a/docs/source/manim_directive.py b/docs/source/manim_directive.py\n--- a/docs/source/manim_directive.py\n+++ b/docs/source/manim_directive.py\n@@ -64,7 +64,11 @@\n rendered in a reference block after the source code.\n \n ref_functions\n- A list of functions and methods, separated by spaces,\n+ A list of functions, separated by spaces,\n+ that is rendered in a reference block after the source code.\n+\n+ ref_methods\n+ A list of methods, separated by spaces,\n that is rendered in a reference block after the source code.\n \n \"\"\"\n@@ -133,6 +137,7 @@\n \"ref_modules\": lambda arg: process_name_list(arg, \"mod\"),\n \"ref_classes\": lambda arg: process_name_list(arg, \"class\"),\n \"ref_functions\": lambda arg: process_name_list(arg, \"func\"),\n+ \"ref_methods\": lambda arg: process_name_list(arg, \"meth\"),\n }\n final_argument_whitespace = True\n \n@@ -163,6 +168,7 @@\n self.options.get(\"ref_modules\", [])\n + self.options.get(\"ref_classes\", [])\n + self.options.get(\"ref_functions\", [])\n+ + self.options.get(\"ref_methods\", [])\n )\n if ref_content:\n ref_block = f\"\"\"\n", "issue": "ManimDirective needs a `ref_methods`\nSome of the current documentation uses `:ref_functions:` in the example documentation which is supposed to link to the proper function; however, it is often the case that the function is actually a method. In these instances, a ref_methods would be appropriate to have, and the usage of this in the examples should be updated accordingly.\r\n\r\nSee https://github.com/ManimCommunity/manim/blob/master/docs/source/manim_directive.py#L133-L135\n", "before_files": [{"content": "r\"\"\"\nA directive for including Manim videos in a Sphinx document\n===========================================================\n\nWhen rendering the HTML documentation, the ``.. manim::`` directive\nimplemented here allows to include rendered videos.\n\nIts basic usage that allows processing **inline content**\nlooks as follows::\n\n .. manim:: MyScene\n\n class MyScene(Scene):\n def construct(self):\n ...\n\nIt is required to pass the name of the class representing the\nscene to be rendered to the directive.\n\nAs a second application, the directive can also be used to\nrender scenes that are defined within doctests, for example::\n\n .. manim:: DirectiveDoctestExample\n :ref_classes: Dot\n\n >>> dot = Dot(color=RED)\n >>> dot.color\n <Color #fc6255>\n >>> class DirectiveDoctestExample(Scene):\n ... def construct(self):\n ... self.play(Create(dot))\n\n\nOptions\n-------\n\nOptions can be passed as follows::\n\n .. manim:: <Class name>\n :<option name>: <value>\n\nThe following configuration options are supported by the\ndirective:\n\n hide_source\n If this flag is present without argument,\n the source code is not displayed above the rendered video.\n\n quality : {'low', 'medium', 'high', 'fourk'}\n Controls render quality of the video, in analogy to\n the corresponding command line flags.\n\n save_as_gif\n If this flag is present without argument,\n the scene is rendered as a gif.\n\n save_last_frame\n If this flag is present without argument,\n an image representing the last frame of the scene will\n be rendered and displayed, instead of a video.\n\n ref_classes\n A list of classes, separated by spaces, that is\n rendered in a reference block after the source code.\n\n ref_functions\n A list of functions and methods, separated by spaces,\n that is rendered in a reference block after the source code.\n\n\"\"\"\nfrom docutils import nodes\nfrom docutils.parsers.rst import directives, Directive\nfrom docutils.statemachine import StringList\n\nimport jinja2\nimport os\nfrom os.path import relpath\nfrom pathlib import Path\nfrom typing import List\n\nimport shutil\n\nfrom manim import QUALITIES\n\nclassnamedict = {}\n\n\nclass skip_manim_node(nodes.Admonition, nodes.Element):\n pass\n\n\ndef visit(self, node, name=\"\"):\n self.visit_admonition(node, name)\n\n\ndef depart(self, node):\n self.depart_admonition(node)\n\n\ndef process_name_list(option_input: str, reference_type: str) -> List[str]:\n r\"\"\"Reformats a string of space separated class names\n as a list of strings containing valid Sphinx references.\n\n Tests\n -----\n\n ::\n\n >>> process_name_list(\"Tex TexTemplate\", \"class\")\n [\":class:`~.Tex`\", \":class:`~.TexTemplate`\"]\n >>> process_name_list(\"Scene.play Mobject.rotate\", \"func\")\n [\":func:`~.Scene.play`\", \":func:`~.Mobject.rotate`\"]\n \"\"\"\n return [f\":{reference_type}:`~.{name}`\" for name in option_input.split()]\n\n\nclass ManimDirective(Directive):\n r\"\"\"The manim directive, rendering videos while building\n the documentation.\n\n See the module docstring for documentation.\n \"\"\"\n has_content = True\n required_arguments = 1\n optional_arguments = 0\n option_spec = {\n \"hide_source\": bool,\n \"quality\": lambda arg: directives.choice(\n arg, (\"low\", \"medium\", \"high\", \"fourk\")\n ),\n \"save_as_gif\": bool,\n \"save_last_frame\": bool,\n \"ref_modules\": lambda arg: process_name_list(arg, \"mod\"),\n \"ref_classes\": lambda arg: process_name_list(arg, \"class\"),\n \"ref_functions\": lambda arg: process_name_list(arg, \"func\"),\n }\n final_argument_whitespace = True\n\n def run(self):\n if \"skip-manim\" in self.state.document.settings.env.app.builder.tags.tags:\n node = skip_manim_node()\n self.state.nested_parse(\n StringList(self.content[0]), self.content_offset, node\n )\n return [node]\n\n from manim import config\n\n global classnamedict\n\n clsname = self.arguments[0]\n if clsname not in classnamedict:\n classnamedict[clsname] = 1\n else:\n classnamedict[clsname] += 1\n\n hide_source = \"hide_source\" in self.options\n save_as_gif = \"save_as_gif\" in self.options\n save_last_frame = \"save_last_frame\" in self.options\n assert not (save_as_gif and save_last_frame)\n\n ref_content = (\n self.options.get(\"ref_modules\", [])\n + self.options.get(\"ref_classes\", [])\n + self.options.get(\"ref_functions\", [])\n )\n if ref_content:\n ref_block = f\"\"\"\n.. admonition:: Example References\n :class: example-reference\n\n {' '.join(ref_content)}\"\"\"\n else:\n ref_block = \"\"\n\n if \"quality\" in self.options:\n quality = f'{self.options[\"quality\"]}_quality'\n else:\n quality = \"example_quality\"\n frame_rate = QUALITIES[quality][\"frame_rate\"]\n pixel_height = QUALITIES[quality][\"pixel_height\"]\n pixel_width = QUALITIES[quality][\"pixel_width\"]\n qualitydir = f\"{pixel_height}p{frame_rate}\"\n\n state_machine = self.state_machine\n document = state_machine.document\n\n source_file_name = document.attributes[\"source\"]\n source_rel_name = relpath(source_file_name, setup.confdir)\n source_rel_dir = os.path.dirname(source_rel_name)\n while source_rel_dir.startswith(os.path.sep):\n source_rel_dir = source_rel_dir[1:]\n\n dest_dir = os.path.abspath(\n os.path.join(setup.app.builder.outdir, source_rel_dir)\n )\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n\n source_block = [\n \".. code-block:: python\",\n \"\",\n *[\" \" + line for line in self.content],\n ]\n source_block = \"\\n\".join(source_block)\n\n config.media_dir = Path(setup.confdir) / \"media\"\n config.images_dir = \"{media_dir}/images\"\n config.video_dir = \"{media_dir}/videos/{quality}\"\n output_file = f\"{clsname}-{classnamedict[clsname]}\"\n config.assets_dir = Path(\"_static\")\n\n config_code = [\n f'config[\"frame_rate\"] = {frame_rate}',\n f'config[\"pixel_height\"] = {pixel_height}',\n f'config[\"pixel_width\"] = {pixel_width}',\n f'config[\"save_last_frame\"] = {save_last_frame}',\n f'config[\"save_as_gif\"] = {save_as_gif}',\n f'config[\"output_file\"] = r\"{output_file}\"',\n ]\n\n user_code = self.content\n if user_code[0].startswith(\">>> \"): # check whether block comes from doctest\n user_code = [\n line[4:] for line in user_code if line.startswith((\">>> \", \"... \"))\n ]\n\n code = [\n \"from manim import *\",\n *config_code,\n *user_code,\n f\"{clsname}().render()\",\n ]\n exec(\"\\n\".join(code), globals())\n\n # copy video file to output directory\n if not (save_as_gif or save_last_frame):\n filename = f\"{output_file}.mp4\"\n filesrc = config.get_dir(\"video_dir\") / filename\n destfile = os.path.join(dest_dir, filename)\n shutil.copyfile(filesrc, destfile)\n elif save_as_gif:\n filename = f\"{output_file}.gif\"\n filesrc = config.get_dir(\"video_dir\") / filename\n elif save_last_frame:\n filename = f\"{output_file}.png\"\n filesrc = config.get_dir(\"images_dir\") / filename\n else:\n raise ValueError(\"Invalid combination of render flags received.\")\n\n rendered_template = jinja2.Template(TEMPLATE).render(\n clsname=clsname,\n clsname_lowercase=clsname.lower(),\n hide_source=hide_source,\n filesrc_rel=os.path.relpath(filesrc, setup.confdir),\n output_file=output_file,\n save_last_frame=save_last_frame,\n save_as_gif=save_as_gif,\n source_block=source_block,\n ref_block=ref_block,\n )\n state_machine.insert_input(\n rendered_template.split(\"\\n\"), source=document.attributes[\"source\"]\n )\n\n return []\n\n\ndef setup(app):\n import manim\n\n app.add_node(skip_manim_node, html=(visit, depart))\n\n setup.app = app\n setup.config = app.config\n setup.confdir = app.confdir\n\n app.add_directive(\"manim\", ManimDirective)\n\n metadata = {\"parallel_read_safe\": False, \"parallel_write_safe\": True}\n return metadata\n\n\nTEMPLATE = r\"\"\"\n{% if not hide_source %}\n.. raw:: html\n\n <div class=\"manim-example\">\n\n{% endif %}\n\n{% if not (save_as_gif or save_last_frame) %}\n.. raw:: html\n\n <video id=\"{{ clsname_lowercase }}\" class=\"manim-video\" controls loop autoplay src=\"./{{ output_file }}.mp4\"></video>\n{% elif save_as_gif %}\n.. image:: /{{ filesrc_rel }}\n :align: center\n :name: {{ clsname_lowercase }}\n{% elif save_last_frame %}\n.. image:: /{{ filesrc_rel }}\n :align: center\n :name: {{ clsname_lowercase }}\n{% endif %}\n{% if not hide_source %}\n.. raw:: html\n\n <h5 class=\"example-header\">{{ clsname }}<a class=\"headerlink\" href=\"#{{ clsname_lowercase }}\">\u00b6</a></h5>\n\n{{ source_block }}\n{{ ref_block }}\n{% endif %}\n\n.. raw:: html\n\n </div>\n\"\"\"\n", "path": "docs/source/manim_directive.py"}]} | 3,673 | 294 |
gh_patches_debug_19972 | rasdani/github-patches | git_diff | CTFd__CTFd-1059 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Feature Request : Search users by IP address
It's really hard to search an IP address on the UI now.
It'd be nice to have a search feature in `admin/users` search a user by an IP address. CTFd already logs IP addresses of each login so it'll be very useful when wanting to ban certain users for abusing rules.
</issue>
<code>
[start of CTFd/admin/users.py]
1 from flask import render_template, request
2 from CTFd.utils import get_config
3 from CTFd.utils.decorators import admins_only
4 from CTFd.utils.modes import TEAMS_MODE
5 from CTFd.models import db, Users, Challenges, Tracking
6 from CTFd.admin import admin
7 from CTFd.utils.helpers import get_errors
8
9 from sqlalchemy.sql import not_
10
11
12 @admin.route("/admin/users")
13 @admins_only
14 def users_listing():
15 page = abs(request.args.get("page", 1, type=int))
16 q = request.args.get("q")
17 if q:
18 field = request.args.get("field")
19 users = []
20 errors = get_errors()
21 if field == "id":
22 if q.isnumeric():
23 users = Users.query.filter(Users.id == q).order_by(Users.id.asc()).all()
24 else:
25 users = []
26 errors.append("Your ID search term is not numeric")
27 elif field == "name":
28 users = (
29 Users.query.filter(Users.name.like("%{}%".format(q)))
30 .order_by(Users.id.asc())
31 .all()
32 )
33 elif field == "email":
34 users = (
35 Users.query.filter(Users.email.like("%{}%".format(q)))
36 .order_by(Users.id.asc())
37 .all()
38 )
39 elif field == "affiliation":
40 users = (
41 Users.query.filter(Users.affiliation.like("%{}%".format(q)))
42 .order_by(Users.id.asc())
43 .all()
44 )
45 return render_template(
46 "admin/users/users.html",
47 users=users,
48 pages=None,
49 curr_page=None,
50 q=q,
51 field=field,
52 )
53
54 page = abs(int(page))
55 results_per_page = 50
56 page_start = results_per_page * (page - 1)
57 page_end = results_per_page * (page - 1) + results_per_page
58
59 users = Users.query.order_by(Users.id.asc()).slice(page_start, page_end).all()
60 count = db.session.query(db.func.count(Users.id)).first()[0]
61 pages = int(count / results_per_page) + (count % results_per_page > 0)
62
63 return render_template(
64 "admin/users/users.html", users=users, pages=pages, curr_page=page
65 )
66
67
68 @admin.route("/admin/users/new")
69 @admins_only
70 def users_new():
71 return render_template("admin/users/new.html")
72
73
74 @admin.route("/admin/users/<int:user_id>")
75 @admins_only
76 def users_detail(user_id):
77 # Get user object
78 user = Users.query.filter_by(id=user_id).first_or_404()
79
80 # Get the user's solves
81 solves = user.get_solves(admin=True)
82
83 # Get challenges that the user is missing
84 if get_config("user_mode") == TEAMS_MODE:
85 if user.team:
86 all_solves = user.team.get_solves(admin=True)
87 else:
88 all_solves = user.get_solves(admin=True)
89 else:
90 all_solves = user.get_solves(admin=True)
91
92 solve_ids = [s.challenge_id for s in all_solves]
93 missing = Challenges.query.filter(not_(Challenges.id.in_(solve_ids))).all()
94
95 # Get IP addresses that the User has used
96 addrs = (
97 Tracking.query.filter_by(user_id=user_id).order_by(Tracking.date.desc()).all()
98 )
99
100 # Get Fails
101 fails = user.get_fails(admin=True)
102
103 # Get Awards
104 awards = user.get_awards(admin=True)
105
106 # Get user properties
107 score = user.get_score(admin=True)
108 place = user.get_place(admin=True)
109
110 return render_template(
111 "admin/users/user.html",
112 solves=solves,
113 user=user,
114 addrs=addrs,
115 score=score,
116 missing=missing,
117 place=place,
118 fails=fails,
119 awards=awards,
120 )
121
[end of CTFd/admin/users.py]
[start of CTFd/admin/teams.py]
1 from flask import render_template, request
2 from CTFd.utils.decorators import admins_only
3 from CTFd.models import db, Teams, Challenges, Tracking
4 from CTFd.admin import admin
5 from CTFd.utils.helpers import get_errors
6
7 from sqlalchemy.sql import not_
8
9
10 @admin.route("/admin/teams")
11 @admins_only
12 def teams_listing():
13 page = abs(request.args.get("page", 1, type=int))
14 q = request.args.get("q")
15 if q:
16 field = request.args.get("field")
17 teams = []
18 errors = get_errors()
19 if field == "id":
20 if q.isnumeric():
21 teams = Teams.query.filter(Teams.id == q).order_by(Teams.id.asc()).all()
22 else:
23 teams = []
24 errors.append("Your ID search term is not numeric")
25 elif field == "name":
26 teams = (
27 Teams.query.filter(Teams.name.like("%{}%".format(q)))
28 .order_by(Teams.id.asc())
29 .all()
30 )
31 elif field == "email":
32 teams = (
33 Teams.query.filter(Teams.email.like("%{}%".format(q)))
34 .order_by(Teams.id.asc())
35 .all()
36 )
37 elif field == "affiliation":
38 teams = (
39 Teams.query.filter(Teams.affiliation.like("%{}%".format(q)))
40 .order_by(Teams.id.asc())
41 .all()
42 )
43 return render_template(
44 "admin/teams/teams.html",
45 teams=teams,
46 pages=None,
47 curr_page=None,
48 q=q,
49 field=field,
50 )
51
52 page = abs(int(page))
53 results_per_page = 50
54 page_start = results_per_page * (page - 1)
55 page_end = results_per_page * (page - 1) + results_per_page
56
57 teams = Teams.query.order_by(Teams.id.asc()).slice(page_start, page_end).all()
58 count = db.session.query(db.func.count(Teams.id)).first()[0]
59 pages = int(count / results_per_page) + (count % results_per_page > 0)
60 return render_template(
61 "admin/teams/teams.html", teams=teams, pages=pages, curr_page=page
62 )
63
64
65 @admin.route("/admin/teams/new")
66 @admins_only
67 def teams_new():
68 return render_template("admin/teams/new.html")
69
70
71 @admin.route("/admin/teams/<int:team_id>")
72 @admins_only
73 def teams_detail(team_id):
74 team = Teams.query.filter_by(id=team_id).first_or_404()
75
76 # Get members
77 members = team.members
78 member_ids = [member.id for member in members]
79
80 # Get Solves for all members
81 solves = team.get_solves(admin=True)
82 fails = team.get_fails(admin=True)
83 awards = team.get_awards(admin=True)
84 score = team.get_score(admin=True)
85 place = team.get_place(admin=True)
86
87 # Get missing Challenges for all members
88 # TODO: How do you mark a missing challenge for a team?
89 solve_ids = [s.challenge_id for s in solves]
90 missing = Challenges.query.filter(not_(Challenges.id.in_(solve_ids))).all()
91
92 # Get addresses for all members
93 addrs = (
94 Tracking.query.filter(Tracking.user_id.in_(member_ids))
95 .order_by(Tracking.date.desc())
96 .all()
97 )
98
99 return render_template(
100 "admin/teams/team.html",
101 team=team,
102 members=members,
103 score=score,
104 place=place,
105 solves=solves,
106 fails=fails,
107 missing=missing,
108 awards=awards,
109 addrs=addrs,
110 )
111
[end of CTFd/admin/teams.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/CTFd/admin/teams.py b/CTFd/admin/teams.py
--- a/CTFd/admin/teams.py
+++ b/CTFd/admin/teams.py
@@ -43,7 +43,7 @@
return render_template(
"admin/teams/teams.html",
teams=teams,
- pages=None,
+ pages=0,
curr_page=None,
q=q,
field=field,
diff --git a/CTFd/admin/users.py b/CTFd/admin/users.py
--- a/CTFd/admin/users.py
+++ b/CTFd/admin/users.py
@@ -42,10 +42,18 @@
.order_by(Users.id.asc())
.all()
)
+ elif field == "ip":
+ users = (
+ Users.query.join(Tracking, Users.id == Tracking.user_id)
+ .filter(Tracking.ip.like("%{}%".format(q)))
+ .order_by(Users.id.asc())
+ .all()
+ )
+
return render_template(
"admin/users/users.html",
users=users,
- pages=None,
+ pages=0,
curr_page=None,
q=q,
field=field,
| {"golden_diff": "diff --git a/CTFd/admin/teams.py b/CTFd/admin/teams.py\n--- a/CTFd/admin/teams.py\n+++ b/CTFd/admin/teams.py\n@@ -43,7 +43,7 @@\n return render_template(\n \"admin/teams/teams.html\",\n teams=teams,\n- pages=None,\n+ pages=0,\n curr_page=None,\n q=q,\n field=field,\ndiff --git a/CTFd/admin/users.py b/CTFd/admin/users.py\n--- a/CTFd/admin/users.py\n+++ b/CTFd/admin/users.py\n@@ -42,10 +42,18 @@\n .order_by(Users.id.asc())\n .all()\n )\n+ elif field == \"ip\":\n+ users = (\n+ Users.query.join(Tracking, Users.id == Tracking.user_id)\n+ .filter(Tracking.ip.like(\"%{}%\".format(q)))\n+ .order_by(Users.id.asc())\n+ .all()\n+ )\n+\n return render_template(\n \"admin/users/users.html\",\n users=users,\n- pages=None,\n+ pages=0,\n curr_page=None,\n q=q,\n field=field,\n", "issue": "Feature Request : Search users by IP address\nIt's really hard to search an IP address on the UI now.\r\n\r\nIt'd be nice to have a search feature in `admin/users` search a user by an IP address. CTFd already logs IP addresses of each login so it'll be very useful when wanting to ban certain users for abusing rules.\r\n\n", "before_files": [{"content": "from flask import render_template, request\nfrom CTFd.utils import get_config\nfrom CTFd.utils.decorators import admins_only\nfrom CTFd.utils.modes import TEAMS_MODE\nfrom CTFd.models import db, Users, Challenges, Tracking\nfrom CTFd.admin import admin\nfrom CTFd.utils.helpers import get_errors\n\nfrom sqlalchemy.sql import not_\n\n\[email protected](\"/admin/users\")\n@admins_only\ndef users_listing():\n page = abs(request.args.get(\"page\", 1, type=int))\n q = request.args.get(\"q\")\n if q:\n field = request.args.get(\"field\")\n users = []\n errors = get_errors()\n if field == \"id\":\n if q.isnumeric():\n users = Users.query.filter(Users.id == q).order_by(Users.id.asc()).all()\n else:\n users = []\n errors.append(\"Your ID search term is not numeric\")\n elif field == \"name\":\n users = (\n Users.query.filter(Users.name.like(\"%{}%\".format(q)))\n .order_by(Users.id.asc())\n .all()\n )\n elif field == \"email\":\n users = (\n Users.query.filter(Users.email.like(\"%{}%\".format(q)))\n .order_by(Users.id.asc())\n .all()\n )\n elif field == \"affiliation\":\n users = (\n Users.query.filter(Users.affiliation.like(\"%{}%\".format(q)))\n .order_by(Users.id.asc())\n .all()\n )\n return render_template(\n \"admin/users/users.html\",\n users=users,\n pages=None,\n curr_page=None,\n q=q,\n field=field,\n )\n\n page = abs(int(page))\n results_per_page = 50\n page_start = results_per_page * (page - 1)\n page_end = results_per_page * (page - 1) + results_per_page\n\n users = Users.query.order_by(Users.id.asc()).slice(page_start, page_end).all()\n count = db.session.query(db.func.count(Users.id)).first()[0]\n pages = int(count / results_per_page) + (count % results_per_page > 0)\n\n return render_template(\n \"admin/users/users.html\", users=users, pages=pages, curr_page=page\n )\n\n\[email protected](\"/admin/users/new\")\n@admins_only\ndef users_new():\n return render_template(\"admin/users/new.html\")\n\n\[email protected](\"/admin/users/<int:user_id>\")\n@admins_only\ndef users_detail(user_id):\n # Get user object\n user = Users.query.filter_by(id=user_id).first_or_404()\n\n # Get the user's solves\n solves = user.get_solves(admin=True)\n\n # Get challenges that the user is missing\n if get_config(\"user_mode\") == TEAMS_MODE:\n if user.team:\n all_solves = user.team.get_solves(admin=True)\n else:\n all_solves = user.get_solves(admin=True)\n else:\n all_solves = user.get_solves(admin=True)\n\n solve_ids = [s.challenge_id for s in all_solves]\n missing = Challenges.query.filter(not_(Challenges.id.in_(solve_ids))).all()\n\n # Get IP addresses that the User has used\n addrs = (\n Tracking.query.filter_by(user_id=user_id).order_by(Tracking.date.desc()).all()\n )\n\n # Get Fails\n fails = user.get_fails(admin=True)\n\n # Get Awards\n awards = user.get_awards(admin=True)\n\n # Get user properties\n score = user.get_score(admin=True)\n place = user.get_place(admin=True)\n\n return render_template(\n \"admin/users/user.html\",\n solves=solves,\n user=user,\n addrs=addrs,\n score=score,\n missing=missing,\n place=place,\n fails=fails,\n awards=awards,\n )\n", "path": "CTFd/admin/users.py"}, {"content": "from flask import render_template, request\nfrom CTFd.utils.decorators import admins_only\nfrom CTFd.models import db, Teams, Challenges, Tracking\nfrom CTFd.admin import admin\nfrom CTFd.utils.helpers import get_errors\n\nfrom sqlalchemy.sql import not_\n\n\[email protected](\"/admin/teams\")\n@admins_only\ndef teams_listing():\n page = abs(request.args.get(\"page\", 1, type=int))\n q = request.args.get(\"q\")\n if q:\n field = request.args.get(\"field\")\n teams = []\n errors = get_errors()\n if field == \"id\":\n if q.isnumeric():\n teams = Teams.query.filter(Teams.id == q).order_by(Teams.id.asc()).all()\n else:\n teams = []\n errors.append(\"Your ID search term is not numeric\")\n elif field == \"name\":\n teams = (\n Teams.query.filter(Teams.name.like(\"%{}%\".format(q)))\n .order_by(Teams.id.asc())\n .all()\n )\n elif field == \"email\":\n teams = (\n Teams.query.filter(Teams.email.like(\"%{}%\".format(q)))\n .order_by(Teams.id.asc())\n .all()\n )\n elif field == \"affiliation\":\n teams = (\n Teams.query.filter(Teams.affiliation.like(\"%{}%\".format(q)))\n .order_by(Teams.id.asc())\n .all()\n )\n return render_template(\n \"admin/teams/teams.html\",\n teams=teams,\n pages=None,\n curr_page=None,\n q=q,\n field=field,\n )\n\n page = abs(int(page))\n results_per_page = 50\n page_start = results_per_page * (page - 1)\n page_end = results_per_page * (page - 1) + results_per_page\n\n teams = Teams.query.order_by(Teams.id.asc()).slice(page_start, page_end).all()\n count = db.session.query(db.func.count(Teams.id)).first()[0]\n pages = int(count / results_per_page) + (count % results_per_page > 0)\n return render_template(\n \"admin/teams/teams.html\", teams=teams, pages=pages, curr_page=page\n )\n\n\[email protected](\"/admin/teams/new\")\n@admins_only\ndef teams_new():\n return render_template(\"admin/teams/new.html\")\n\n\[email protected](\"/admin/teams/<int:team_id>\")\n@admins_only\ndef teams_detail(team_id):\n team = Teams.query.filter_by(id=team_id).first_or_404()\n\n # Get members\n members = team.members\n member_ids = [member.id for member in members]\n\n # Get Solves for all members\n solves = team.get_solves(admin=True)\n fails = team.get_fails(admin=True)\n awards = team.get_awards(admin=True)\n score = team.get_score(admin=True)\n place = team.get_place(admin=True)\n\n # Get missing Challenges for all members\n # TODO: How do you mark a missing challenge for a team?\n solve_ids = [s.challenge_id for s in solves]\n missing = Challenges.query.filter(not_(Challenges.id.in_(solve_ids))).all()\n\n # Get addresses for all members\n addrs = (\n Tracking.query.filter(Tracking.user_id.in_(member_ids))\n .order_by(Tracking.date.desc())\n .all()\n )\n\n return render_template(\n \"admin/teams/team.html\",\n team=team,\n members=members,\n score=score,\n place=place,\n solves=solves,\n fails=fails,\n missing=missing,\n awards=awards,\n addrs=addrs,\n )\n", "path": "CTFd/admin/teams.py"}]} | 2,769 | 265 |
gh_patches_debug_1305 | rasdani/github-patches | git_diff | oppia__oppia-7459 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Upgrade @typescript-eslint/eslint-plugin
`eslint-utils` is currently out of date, https://github.com/oppia/oppia/pull/7451 provides a temporary fix, but we need to upgrade the main package that requires `eslint-utils` to ensure that we have a long term fix.
When fixing this, please make sure that the lint tests run successfully.
</issue>
<code>
[start of core/domain/feedback_jobs_one_off.py]
1 # Copyright 2019 The Oppia Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS-IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """One-off jobs for feedback models."""
16
17 from core import jobs
18 from core.platform import models
19
20 (feedback_models,) = models.Registry.import_models([models.NAMES.feedback])
21
22
23 class GeneralFeedbackThreadUserOneOffJob(jobs.BaseMapReduceOneOffJobManager):
24 """One-off job for setting user_id and thread_id for all
25 GeneralFeedbackThreadUserModels.
26 """
27 @classmethod
28 def entity_classes_to_map_over(cls):
29 """Return a list of datastore class references to map over."""
30 return [feedback_models.GeneralFeedbackThreadUserModel]
31
32 @staticmethod
33 def map(model_instance):
34 """Implements the map function for this job."""
35 user_id, thread_id = model_instance.id.split('.', 1)
36 if model_instance.user_id is None:
37 model_instance.user_id = user_id
38 if model_instance.thread_id is None:
39 model_instance.thread_id = thread_id
40 model_instance.put(update_last_updated_time=False)
41 yield ('SUCCESS', model_instance.id)
42
43 @staticmethod
44 def reduce(key, values):
45 yield (key, len(values))
46
[end of core/domain/feedback_jobs_one_off.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/domain/feedback_jobs_one_off.py b/core/domain/feedback_jobs_one_off.py
--- a/core/domain/feedback_jobs_one_off.py
+++ b/core/domain/feedback_jobs_one_off.py
@@ -13,6 +13,7 @@
# limitations under the License.
"""One-off jobs for feedback models."""
+from __future__ import absolute_import # pylint: disable=import-only-modules
from core import jobs
from core.platform import models
| {"golden_diff": "diff --git a/core/domain/feedback_jobs_one_off.py b/core/domain/feedback_jobs_one_off.py\n--- a/core/domain/feedback_jobs_one_off.py\n+++ b/core/domain/feedback_jobs_one_off.py\n@@ -13,6 +13,7 @@\n # limitations under the License.\n \n \"\"\"One-off jobs for feedback models.\"\"\"\n+from __future__ import absolute_import # pylint: disable=import-only-modules\n \n from core import jobs\n from core.platform import models\n", "issue": "Upgrade @typescript-eslint/eslint-plugin\n`eslint-utils` is currently out of date, https://github.com/oppia/oppia/pull/7451 provides a temporary fix, but we need to upgrade the main package that requires `eslint-utils` to ensure that we have a long term fix. \r\n\r\nWhen fixing this, please make sure that the lint tests run successfully.\n", "before_files": [{"content": "# Copyright 2019 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"One-off jobs for feedback models.\"\"\"\n\nfrom core import jobs\nfrom core.platform import models\n\n(feedback_models,) = models.Registry.import_models([models.NAMES.feedback])\n\n\nclass GeneralFeedbackThreadUserOneOffJob(jobs.BaseMapReduceOneOffJobManager):\n \"\"\"One-off job for setting user_id and thread_id for all\n GeneralFeedbackThreadUserModels.\n \"\"\"\n @classmethod\n def entity_classes_to_map_over(cls):\n \"\"\"Return a list of datastore class references to map over.\"\"\"\n return [feedback_models.GeneralFeedbackThreadUserModel]\n\n @staticmethod\n def map(model_instance):\n \"\"\"Implements the map function for this job.\"\"\"\n user_id, thread_id = model_instance.id.split('.', 1)\n if model_instance.user_id is None:\n model_instance.user_id = user_id\n if model_instance.thread_id is None:\n model_instance.thread_id = thread_id\n model_instance.put(update_last_updated_time=False)\n yield ('SUCCESS', model_instance.id)\n\n @staticmethod\n def reduce(key, values):\n yield (key, len(values))\n", "path": "core/domain/feedback_jobs_one_off.py"}]} | 1,069 | 99 |
gh_patches_debug_21117 | rasdani/github-patches | git_diff | mlcommons__GaNDLF-614 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CCA failure when enabled
**Describe the bug**
The CCA (Largest Connected Component Analysis) function was implemented as a standalone function, which causes it to fail when called in the segmentation pipeline with post-processing enabled. The expected behavior is a likely failure due to this issue.
**To Reproduce**
Run a segmentation pipeline with CCA enabled for the post-processing.
**Expected behavior**
The CCA function should be corrected and integrated with the segmentation pipeline to work correctly and tested
**GaNDLF Version**
<!-- Put the output of the following command:
python -c 'import GANDLF as g;print(g.__version__)'
-->
Version information of the GaNDLF package in the virtual environment. 0.0.16-dev
**Desktop (please complete the following information):**
- OS: Linux, Ubuntu
- Version (including Build information, if any): 22.04
**Additional context**
None
</issue>
<code>
[start of GANDLF/data/post_process/morphology.py]
1 import torch
2 import torch.nn.functional as F
3 from skimage.measure import label
4 import numpy as np
5 from scipy.ndimage import binary_fill_holes, binary_closing
6 from GANDLF.utils.generic import get_array_from_image_or_tensor
7
8
9 def torch_morphological(input_image, kernel_size=1, mode="dilation"):
10 """
11 This function enables morphological operations using torch. Adapted from https://github.com/DIVA-DIA/Generating-Synthetic-Handwritten-Historical-Documents/blob/e6a798dc2b374f338804222747c56cb44869af5b/HTR_ctc/utils/auxilary_functions.py#L10.
12
13 Args:
14 input_image (torch.Tensor): The input image.
15 kernel_size (list): The size of the window to take a max over.
16 mode (str): The type of morphological operation to perform.
17
18 Returns:
19 torch.Tensor: The output image after morphological operations.
20 """
21
22 if len(input_image.shape) == 4:
23 max_pool = F.max_pool2d
24 elif len(input_image.shape) == 5:
25 max_pool = F.max_pool3d
26 else:
27 raise ValueError("Input image has invalid shape for morphological operations.")
28
29 if mode == "dilation":
30 output_image = max_pool(
31 input_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2
32 )
33 elif mode == "erosion":
34 output_image = -max_pool(
35 -input_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2
36 )
37 elif mode == "closing":
38 output_image = max_pool(
39 input_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2
40 )
41 output_image = -max_pool(
42 -output_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2
43 )
44 elif mode == "opening":
45 output_image = -max_pool(
46 -input_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2
47 )
48 output_image = max_pool(
49 output_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2
50 )
51
52 return output_image
53
54
55 def fill_holes(input_image, params=None):
56 """
57 This function fills holes in masks.
58
59 Args:
60 input_image (torch.Tensor): The input image.
61 params (dict): The parameters dict; unused.
62
63 Returns:
64 torch.Tensor: The output image after morphological operations.
65 """
66 input_image_array = get_array_from_image_or_tensor(input_image).astype(int)
67 input_image_array_closed = binary_closing(input_image_array)
68 # Fill the holes in binary objects
69 output_array = binary_fill_holes(input_image_array_closed).astype(int)
70
71 return torch.from_numpy(output_array)
72
73
74 def cca(input_image):
75 """
76 This function performs connected component analysis on the input image.
77
78 Args:
79 input_image (torch.Tensor): The input image.
80 params (dict): The parameters dict;
81
82 Returns:
83 torch.Tensor: The output image after morphological operations.
84 """
85 seg = get_array_from_image_or_tensor(input_image)
86 mask = seg != 0
87
88 connectivity = input_image.dim() - 1
89 labels_connected = label(mask, connectivity=connectivity)
90 labels_connected_sizes = [
91 np.sum(labels_connected == i) for i in np.unique(labels_connected)
92 ]
93 largest_region = np.argmax(labels_connected_sizes[1:]) + 1
94 seg[labels_connected != largest_region] = 0
95 return seg
96
[end of GANDLF/data/post_process/morphology.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/GANDLF/data/post_process/morphology.py b/GANDLF/data/post_process/morphology.py
--- a/GANDLF/data/post_process/morphology.py
+++ b/GANDLF/data/post_process/morphology.py
@@ -71,7 +71,7 @@
return torch.from_numpy(output_array)
-def cca(input_image):
+def cca(input_image, params=None):
"""
This function performs connected component analysis on the input image.
@@ -85,11 +85,15 @@
seg = get_array_from_image_or_tensor(input_image)
mask = seg != 0
- connectivity = input_image.dim() - 1
+ connectivity = input_image.ndim - 1
labels_connected = label(mask, connectivity=connectivity)
labels_connected_sizes = [
np.sum(labels_connected == i) for i in np.unique(labels_connected)
]
- largest_region = np.argmax(labels_connected_sizes[1:]) + 1
+ largest_region = 0
+ if len(labels_connected_sizes) > 1:
+ largest_region = np.argmax(labels_connected_sizes[1:]) + 1
seg[labels_connected != largest_region] = 0
+
return seg
+
| {"golden_diff": "diff --git a/GANDLF/data/post_process/morphology.py b/GANDLF/data/post_process/morphology.py\n--- a/GANDLF/data/post_process/morphology.py\n+++ b/GANDLF/data/post_process/morphology.py\n@@ -71,7 +71,7 @@\n return torch.from_numpy(output_array)\n \n \n-def cca(input_image):\n+def cca(input_image, params=None):\n \"\"\"\n This function performs connected component analysis on the input image.\n \n@@ -85,11 +85,15 @@\n seg = get_array_from_image_or_tensor(input_image)\n mask = seg != 0\n \n- connectivity = input_image.dim() - 1\n+ connectivity = input_image.ndim - 1\n labels_connected = label(mask, connectivity=connectivity)\n labels_connected_sizes = [\n np.sum(labels_connected == i) for i in np.unique(labels_connected)\n ]\n- largest_region = np.argmax(labels_connected_sizes[1:]) + 1\n+ largest_region = 0\n+ if len(labels_connected_sizes) > 1:\n+ largest_region = np.argmax(labels_connected_sizes[1:]) + 1\n seg[labels_connected != largest_region] = 0\n+\n return seg\n+\n", "issue": "CCA failure when enabled\n**Describe the bug**\r\nThe CCA (Largest Connected Component Analysis) function was implemented as a standalone function, which causes it to fail when called in the segmentation pipeline with post-processing enabled. The expected behavior is a likely failure due to this issue.\r\n\r\n**To Reproduce**\r\nRun a segmentation pipeline with CCA enabled for the post-processing.\r\n\r\n**Expected behavior**\r\nThe CCA function should be corrected and integrated with the segmentation pipeline to work correctly and tested\r\n\r\n**GaNDLF Version**\r\n<!-- Put the output of the following command:\r\npython -c 'import GANDLF as g;print(g.__version__)'\r\n-->\r\nVersion information of the GaNDLF package in the virtual environment. 0.0.16-dev\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Linux, Ubuntu\r\n - Version (including Build information, if any): 22.04\r\n\r\n**Additional context**\r\nNone\r\n\n", "before_files": [{"content": "import torch\nimport torch.nn.functional as F\nfrom skimage.measure import label\nimport numpy as np\nfrom scipy.ndimage import binary_fill_holes, binary_closing\nfrom GANDLF.utils.generic import get_array_from_image_or_tensor\n\n\ndef torch_morphological(input_image, kernel_size=1, mode=\"dilation\"):\n \"\"\"\n This function enables morphological operations using torch. Adapted from https://github.com/DIVA-DIA/Generating-Synthetic-Handwritten-Historical-Documents/blob/e6a798dc2b374f338804222747c56cb44869af5b/HTR_ctc/utils/auxilary_functions.py#L10.\n\n Args:\n input_image (torch.Tensor): The input image.\n kernel_size (list): The size of the window to take a max over.\n mode (str): The type of morphological operation to perform.\n\n Returns:\n torch.Tensor: The output image after morphological operations.\n \"\"\"\n\n if len(input_image.shape) == 4:\n max_pool = F.max_pool2d\n elif len(input_image.shape) == 5:\n max_pool = F.max_pool3d\n else:\n raise ValueError(\"Input image has invalid shape for morphological operations.\")\n\n if mode == \"dilation\":\n output_image = max_pool(\n input_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2\n )\n elif mode == \"erosion\":\n output_image = -max_pool(\n -input_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2\n )\n elif mode == \"closing\":\n output_image = max_pool(\n input_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2\n )\n output_image = -max_pool(\n -output_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2\n )\n elif mode == \"opening\":\n output_image = -max_pool(\n -input_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2\n )\n output_image = max_pool(\n output_image, kernel_size=kernel_size, stride=1, padding=kernel_size // 2\n )\n\n return output_image\n\n\ndef fill_holes(input_image, params=None):\n \"\"\"\n This function fills holes in masks.\n\n Args:\n input_image (torch.Tensor): The input image.\n params (dict): The parameters dict; unused.\n\n Returns:\n torch.Tensor: The output image after morphological operations.\n \"\"\"\n input_image_array = get_array_from_image_or_tensor(input_image).astype(int)\n input_image_array_closed = binary_closing(input_image_array)\n # Fill the holes in binary objects\n output_array = binary_fill_holes(input_image_array_closed).astype(int)\n\n return torch.from_numpy(output_array)\n\n\ndef cca(input_image):\n \"\"\"\n This function performs connected component analysis on the input image.\n\n Args:\n input_image (torch.Tensor): The input image.\n params (dict): The parameters dict;\n\n Returns:\n torch.Tensor: The output image after morphological operations.\n \"\"\"\n seg = get_array_from_image_or_tensor(input_image)\n mask = seg != 0\n\n connectivity = input_image.dim() - 1\n labels_connected = label(mask, connectivity=connectivity)\n labels_connected_sizes = [\n np.sum(labels_connected == i) for i in np.unique(labels_connected)\n ]\n largest_region = np.argmax(labels_connected_sizes[1:]) + 1\n seg[labels_connected != largest_region] = 0\n return seg\n", "path": "GANDLF/data/post_process/morphology.py"}]} | 1,728 | 270 |
gh_patches_debug_17211 | rasdani/github-patches | git_diff | facebookresearch__ParlAI-2264 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can not download p6tyohj.tgz
when I run this command shown at https://parl.ai/#getstarted:
> python examples/eval_model.py -m ir_baseline -t "#moviedd-reddit" -dt valid
It can correctly downloads moviedialog.tar.gz, but can't downlad p6tyohj.tgz.
as following:
```
[ downloading: http://parl.ai/downloads/moviedialog/moviedialog.tar.gz to path/ParlAI/data/MovieDialog/moviedialog.tar.gz ]
Downloading moviedialog.tar.gz: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████| 224M/224M [00:23<00:00, 9.33MB/s]
[ Checksum Successful ]
unpacking moviedialog.tar.gz
[ downloading: http://tinyurl.com/p6tyohj to path/ParlAI/data/MovieDialog/movie_dialog_dataset/task4_reddit/p6tyohj.tgz ]
Connection error, retrying. (4 retries left)
Connection error, retrying. (3 retries left)
Connection error, retrying. (2 retries left)
Connection error, retrying. (1 retries left)
Connection error, retrying. (0 retries left)
Retried too many times, stopped retrying.
Traceback (most recent call last):
File "path/ParlAI/parlai/core/worlds.py", line 1116, in _create_task_agents
task_agents = my_module.create_agents(opt, task)
AttributeError: module 'parlai.tasks.moviedialog.agents' has no attribute 'create_agents'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "examples/eval_model.py", line 17, in <module>
eval_model(opt, print_parser=parser)
File "path/ParlAI/parlai/scripts/eval_model.py", line 117, in eval_model
task_report = _eval_single_world(opt, agent, task)
File "path/ParlAI/parlai/scripts/eval_model.py", line 69, in _eval_single_world
world = create_task(task_opt, agent) # create worlds for tasks
File "path/ParlAI/parlai/core/worlds.py", line 1221, in create_task
world = create_task_world(opt, user_agents, default_world=default_world)
File "path/ParlAI/parlai/core/worlds.py", line 1178, in create_task_world
opt, user_agents, default_world=default_world
File "path/ParlAI/parlai/core/worlds.py", line 1126, in _get_task_world
task_agents = _create_task_agents(opt)
File "path/ParlAI/parlai/core/worlds.py", line 1119, in _create_task_agents
return create_task_agent_from_taskname(opt)
File "path/ParlAI/parlai/core/agents.py", line 837, in create_task_agent_from_taskname
task_agents = teacher_class(opt)
File "path/ParlAI/parlai/tasks/moviedialog/agents.py", line 88, in __init__
opt['datafile'], opt['cands_datafile'] = _path(self.task, opt)
File "path/ParlAI/parlai/tasks/moviedialog/agents.py", line 36, in _path
build(opt)
File "path/ParlAI/parlai/tasks/moviedialog/build.py", line 43, in build
RESOURCES[1].download_file(dpath2)
File "path/ParlAI/parlai/core/build_data.py", line 81, in download_file
download(self.url, dpath, self.file_name)
File "path/ParlAI/parlai/core/build_data.py", line 216, in download
raise RuntimeWarning('Connection broken too many times. Stopped retrying.')
RuntimeWarning: Connection broken too many times. Stopped retrying.
Downloading p6tyohj.tgz: 0.00B [00:31, ?B/s]
```
`
RESOURCES = [
DownloadableFile(
'http://parl.ai/downloads/moviedialog/moviedialog.tar.gz',
'moviedialog.tar.gz',
'9b168d30111f13b9cc50e6a15885adae8f86bc0bb7a124d435c43fd0f7e2a9c7',
),
DownloadableFile(
'http://tinyurl.com/p6tyohj', # maybe this URL is wrong.
'p6tyohj.tgz',
'6316a6a5c563bc3c133a4a1e611d8ca638c61582f331c500697d9090efd215bb',
),
]
`
</issue>
<code>
[start of parlai/tasks/moviedialog/build.py]
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6 # Download and build the data if it does not exist.
7
8 import parlai.core.build_data as build_data
9 import os
10 from parlai.core.build_data import DownloadableFile
11
12 RESOURCES = [
13 DownloadableFile(
14 'http://parl.ai/downloads/moviedialog/moviedialog.tar.gz',
15 'moviedialog.tar.gz',
16 '9b168d30111f13b9cc50e6a15885adae8f86bc0bb7a124d435c43fd0f7e2a9c7',
17 ),
18 DownloadableFile(
19 'http://tinyurl.com/p6tyohj',
20 'p6tyohj.tgz',
21 '6316a6a5c563bc3c133a4a1e611d8ca638c61582f331c500697d9090efd215bb',
22 ),
23 ]
24
25
26 def build(opt):
27 dpath = os.path.join(opt['datapath'], 'MovieDialog')
28 version = '3'
29
30 if not build_data.built(dpath, version_string=version):
31 print('[building data: ' + dpath + ']')
32 if build_data.built(dpath):
33 # An older version exists, so remove these outdated files.
34 build_data.remove_dir(dpath)
35 build_data.make_dir(dpath)
36
37 # Download the data.
38 RESOURCES[0].download_file(dpath)
39
40 dpath2 = os.path.join(dpath, 'movie_dialog_dataset', 'task4_reddit')
41 build_data.make_dir(dpath2)
42
43 RESOURCES[1].download_file(dpath2)
44
45 # remove pipes from task 4 labels, only one label per example
46 for root, _subfolder, files in os.walk(os.path.join(dpath2, 'task4_reddit')):
47 for f in files:
48 if f.endswith('txt'):
49 read_fn = os.path.join(root, f)
50 head = 'task4_reddit_'
51 tail = f[len(head) :]
52 write_fn = os.path.join(root, head + 'pipeless_' + tail)
53 with open(read_fn) as read, open(write_fn, 'w') as write:
54 for line in read:
55 write.write(line.replace('|', ' __PIPE__ ') + '\n')
56
57 # Mark the data as built.
58 build_data.mark_done(dpath, version_string=version)
59
[end of parlai/tasks/moviedialog/build.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parlai/tasks/moviedialog/build.py b/parlai/tasks/moviedialog/build.py
--- a/parlai/tasks/moviedialog/build.py
+++ b/parlai/tasks/moviedialog/build.py
@@ -16,8 +16,8 @@
'9b168d30111f13b9cc50e6a15885adae8f86bc0bb7a124d435c43fd0f7e2a9c7',
),
DownloadableFile(
- 'http://tinyurl.com/p6tyohj',
- 'p6tyohj.tgz',
+ 'http://cs.nyu.edu/~xiang/task4_reddit.tgz',
+ 'task4_reddit.tgz',
'6316a6a5c563bc3c133a4a1e611d8ca638c61582f331c500697d9090efd215bb',
),
]
@@ -25,7 +25,8 @@
def build(opt):
dpath = os.path.join(opt['datapath'], 'MovieDialog')
- version = '3'
+ # 2019-12-11 bump version with changed url
+ version = '3.01'
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
| {"golden_diff": "diff --git a/parlai/tasks/moviedialog/build.py b/parlai/tasks/moviedialog/build.py\n--- a/parlai/tasks/moviedialog/build.py\n+++ b/parlai/tasks/moviedialog/build.py\n@@ -16,8 +16,8 @@\n '9b168d30111f13b9cc50e6a15885adae8f86bc0bb7a124d435c43fd0f7e2a9c7',\n ),\n DownloadableFile(\n- 'http://tinyurl.com/p6tyohj',\n- 'p6tyohj.tgz',\n+ 'http://cs.nyu.edu/~xiang/task4_reddit.tgz',\n+ 'task4_reddit.tgz',\n '6316a6a5c563bc3c133a4a1e611d8ca638c61582f331c500697d9090efd215bb',\n ),\n ]\n@@ -25,7 +25,8 @@\n \n def build(opt):\n dpath = os.path.join(opt['datapath'], 'MovieDialog')\n- version = '3'\n+ # 2019-12-11 bump version with changed url\n+ version = '3.01'\n \n if not build_data.built(dpath, version_string=version):\n print('[building data: ' + dpath + ']')\n", "issue": "Can not download p6tyohj.tgz\n\r\nwhen I run this command shown at https://parl.ai/#getstarted:\r\n\r\n> python examples/eval_model.py -m ir_baseline -t \"#moviedd-reddit\" -dt valid\r\n\r\nIt can correctly downloads moviedialog.tar.gz, but can't downlad p6tyohj.tgz.\r\nas following: \r\n```\r\n[ downloading: http://parl.ai/downloads/moviedialog/moviedialog.tar.gz to path/ParlAI/data/MovieDialog/moviedialog.tar.gz ]\r\nDownloading moviedialog.tar.gz: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 224M/224M [00:23<00:00, 9.33MB/s]\r\n[ Checksum Successful ]\r\nunpacking moviedialog.tar.gz\r\n[ downloading: http://tinyurl.com/p6tyohj to path/ParlAI/data/MovieDialog/movie_dialog_dataset/task4_reddit/p6tyohj.tgz ]\r\nConnection error, retrying. (4 retries left)\r\nConnection error, retrying. (3 retries left)\r\nConnection error, retrying. (2 retries left)\r\nConnection error, retrying. (1 retries left)\r\nConnection error, retrying. (0 retries left)\r\nRetried too many times, stopped retrying.\r\nTraceback (most recent call last):\r\n File \"path/ParlAI/parlai/core/worlds.py\", line 1116, in _create_task_agents\r\n task_agents = my_module.create_agents(opt, task)\r\nAttributeError: module 'parlai.tasks.moviedialog.agents' has no attribute 'create_agents'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"examples/eval_model.py\", line 17, in <module>\r\n eval_model(opt, print_parser=parser)\r\n File \"path/ParlAI/parlai/scripts/eval_model.py\", line 117, in eval_model\r\n task_report = _eval_single_world(opt, agent, task)\r\n File \"path/ParlAI/parlai/scripts/eval_model.py\", line 69, in _eval_single_world\r\n world = create_task(task_opt, agent) # create worlds for tasks\r\n File \"path/ParlAI/parlai/core/worlds.py\", line 1221, in create_task\r\n world = create_task_world(opt, user_agents, default_world=default_world)\r\n File \"path/ParlAI/parlai/core/worlds.py\", line 1178, in create_task_world\r\n opt, user_agents, default_world=default_world\r\n File \"path/ParlAI/parlai/core/worlds.py\", line 1126, in _get_task_world\r\n task_agents = _create_task_agents(opt)\r\n File \"path/ParlAI/parlai/core/worlds.py\", line 1119, in _create_task_agents\r\n return create_task_agent_from_taskname(opt)\r\n File \"path/ParlAI/parlai/core/agents.py\", line 837, in create_task_agent_from_taskname\r\n task_agents = teacher_class(opt)\r\n File \"path/ParlAI/parlai/tasks/moviedialog/agents.py\", line 88, in __init__\r\n opt['datafile'], opt['cands_datafile'] = _path(self.task, opt)\r\n File \"path/ParlAI/parlai/tasks/moviedialog/agents.py\", line 36, in _path\r\n build(opt)\r\n File \"path/ParlAI/parlai/tasks/moviedialog/build.py\", line 43, in build\r\n RESOURCES[1].download_file(dpath2)\r\n File \"path/ParlAI/parlai/core/build_data.py\", line 81, in download_file\r\n download(self.url, dpath, self.file_name)\r\n File \"path/ParlAI/parlai/core/build_data.py\", line 216, in download\r\n raise RuntimeWarning('Connection broken too many times. Stopped retrying.')\r\nRuntimeWarning: Connection broken too many times. Stopped retrying.\r\nDownloading p6tyohj.tgz: 0.00B [00:31, ?B/s]\r\n```\r\n\r\n`\r\nRESOURCES = [\r\n DownloadableFile(\r\n 'http://parl.ai/downloads/moviedialog/moviedialog.tar.gz',\r\n 'moviedialog.tar.gz',\r\n '9b168d30111f13b9cc50e6a15885adae8f86bc0bb7a124d435c43fd0f7e2a9c7',\r\n ),\r\n DownloadableFile(\r\n 'http://tinyurl.com/p6tyohj', # maybe this URL is wrong.\r\n 'p6tyohj.tgz',\r\n '6316a6a5c563bc3c133a4a1e611d8ca638c61582f331c500697d9090efd215bb',\r\n ),\r\n]\r\n`\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n# Download and build the data if it does not exist.\n\nimport parlai.core.build_data as build_data\nimport os\nfrom parlai.core.build_data import DownloadableFile\n\nRESOURCES = [\n DownloadableFile(\n 'http://parl.ai/downloads/moviedialog/moviedialog.tar.gz',\n 'moviedialog.tar.gz',\n '9b168d30111f13b9cc50e6a15885adae8f86bc0bb7a124d435c43fd0f7e2a9c7',\n ),\n DownloadableFile(\n 'http://tinyurl.com/p6tyohj',\n 'p6tyohj.tgz',\n '6316a6a5c563bc3c133a4a1e611d8ca638c61582f331c500697d9090efd215bb',\n ),\n]\n\n\ndef build(opt):\n dpath = os.path.join(opt['datapath'], 'MovieDialog')\n version = '3'\n\n if not build_data.built(dpath, version_string=version):\n print('[building data: ' + dpath + ']')\n if build_data.built(dpath):\n # An older version exists, so remove these outdated files.\n build_data.remove_dir(dpath)\n build_data.make_dir(dpath)\n\n # Download the data.\n RESOURCES[0].download_file(dpath)\n\n dpath2 = os.path.join(dpath, 'movie_dialog_dataset', 'task4_reddit')\n build_data.make_dir(dpath2)\n\n RESOURCES[1].download_file(dpath2)\n\n # remove pipes from task 4 labels, only one label per example\n for root, _subfolder, files in os.walk(os.path.join(dpath2, 'task4_reddit')):\n for f in files:\n if f.endswith('txt'):\n read_fn = os.path.join(root, f)\n head = 'task4_reddit_'\n tail = f[len(head) :]\n write_fn = os.path.join(root, head + 'pipeless_' + tail)\n with open(read_fn) as read, open(write_fn, 'w') as write:\n for line in read:\n write.write(line.replace('|', ' __PIPE__ ') + '\\n')\n\n # Mark the data as built.\n build_data.mark_done(dpath, version_string=version)\n", "path": "parlai/tasks/moviedialog/build.py"}]} | 2,419 | 354 |
gh_patches_debug_42751 | rasdani/github-patches | git_diff | cloudtools__troposphere-1703 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AWS::ImageBuilder::* some wrong data types and attribut missing
In imagebuilder.py (2.6.1 release).
* In AWS::ImageBuilder::*
"Tags" are "json_checker" but should be dict
When encoded in the structure it becomes a string (validator.py, ligne 258,` json.dumps(prop)`) which is creating an issue with CloudFormation that expect a struct like : `"Tags" : {Key : Value, ...}`
* AWS::ImageBuilder::DistributionConfiguration::Distribution
"AmiDistributionConfiguration" is "json_checker" but should be dict.
For the same as above "Tags"
* In AWS::ImageBuilder::Component
"Data" is missing. And should be "json_checker" in that case.
* In AWS::ImageBuilder::ImageRecipe::InstanceBlockDeviceMapping
"NoDevice" is boolean but should be a string
</issue>
<code>
[start of troposphere/imagebuilder.py]
1 # Copyright (c) 2020, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6 from . import AWSObject, AWSProperty
7 from .validators import (integer, boolean, json_checker,
8 component_platforms, imagepipeline_status,
9 schedule_pipelineexecutionstartcondition,
10 ebsinstanceblockdevicespecification_volume_type)
11
12
13 class S3Logs(AWSProperty):
14 props = {
15 "S3BucketName": (basestring, False),
16 "S3KeyPrefix": (basestring, False),
17 }
18
19
20 class Logging(AWSProperty):
21 props = {
22 'S3Logs': (S3Logs, False),
23 }
24
25
26 class InfrastructureConfiguration(AWSObject):
27 resource_type = "AWS::ImageBuilder::InfrastructureConfiguration"
28
29 props = {
30 'Description': (basestring, False),
31 'InstanceProfileName': (basestring, True),
32 'InstanceTypes': ([basestring], False),
33 'KeyPair': (basestring, False),
34 'Logging': (Logging, False),
35 'Name': (basestring, True),
36 'SecurityGroupIds': ([basestring], False),
37 'SnsTopicArn': (basestring, False),
38 'SubnetId': (basestring, False),
39 'Tags': (json_checker, False),
40 'TerminateInstanceOnFailure': (boolean, False)
41 }
42
43
44 class EbsInstanceBlockDeviceSpecification(AWSProperty):
45 props = {
46 'DeleteOnTermination': (boolean, False),
47 'Encrypted': (boolean, False),
48 'Iops': (integer, False),
49 'KmsKeyId': (basestring, False),
50 'SnapshotId': (basestring, False),
51 'VolumeSize': (integer, False),
52 'VolumeType': (ebsinstanceblockdevicespecification_volume_type, False),
53 }
54
55
56 class InstanceBlockDeviceMapping(AWSProperty):
57 props = {
58 'DeviceName': (basestring, False),
59 'Ebs': (EbsInstanceBlockDeviceSpecification, False),
60 'NoDevice': (boolean, False),
61 'VirtualName': (basestring, False),
62 }
63
64
65 class ComponentConfiguration(AWSProperty):
66 props = {
67 'ComponentArn': (basestring, False),
68 }
69
70
71 class ImageRecipe(AWSObject):
72 resource_type = "AWS::ImageBuilder::ImageRecipe"
73
74 props = {
75 'BlockDeviceMappings': ([InstanceBlockDeviceMapping], False),
76 'Components': ([ComponentConfiguration], True),
77 'Description': (basestring, False),
78 'Name': (basestring, True),
79 'ParentImage': (basestring, True),
80 'Tags': (json_checker, False),
81 'Version': (basestring, True)
82 }
83
84
85 class ImageTestsConfiguration(AWSProperty):
86 props = {
87 'ImageTestsEnabled': (boolean, False),
88 'TimeoutMinutes': (integer, False),
89 }
90
91
92 class Schedule(AWSProperty):
93 props = {
94 'PipelineExecutionStartCondition': (schedule_pipelineexecutionstartcondition, False), # NOQA
95 'ScheduleExpression': (basestring, False),
96 }
97
98
99 class ImagePipeline(AWSObject):
100 resource_type = "AWS::ImageBuilder::ImagePipeline"
101
102 props = {
103 'Description': (basestring, False),
104 'DistributionConfigurationArn': (basestring, False),
105 'ImageRecipeArn': (basestring, True),
106 'ImageTestsConfiguration': (ImageTestsConfiguration, False),
107 'InfrastructureConfigurationArn': (basestring, True),
108 'Name': (basestring, True),
109 'Schedule': (Schedule, False),
110 'Status': (imagepipeline_status, False),
111 'Tags': (json_checker, False),
112 }
113
114
115 class Distribution(AWSProperty):
116 props = {
117 'AmiDistributionConfiguration': (json_checker, False),
118 'LicenseConfigurationArns': ([basestring], False),
119 'Region': (basestring, False),
120 }
121
122
123 class DistributionConfiguration(AWSObject):
124 resource_type = "AWS::ImageBuilder::DistributionConfiguration"
125
126 props = {
127 'Description': (basestring, False),
128 'Distributions': ([Distribution], True),
129 'Name': (basestring, True),
130 'Tags': (json_checker, False),
131 }
132
133
134 class Component(AWSObject):
135 resource_type = "AWS::ImageBuilder::Component"
136
137 props = {
138 'ChangeDescription': (basestring, False),
139 'Description': (basestring, False),
140 'KmsKeyId': (basestring, False),
141 'Name': (basestring, True),
142 'Platform': (component_platforms, True),
143 'Tags': (json_checker, False),
144 'Uri': (basestring, False),
145 'Version': (basestring, True),
146 }
147
148
149 class Image(AWSObject):
150 resource_type = "AWS::ImageBuilder::Image"
151
152 props = {
153 'DistributionConfigurationArn': (basestring, False),
154 'ImageRecipeArn': (basestring, True),
155 'ImageTestsConfiguration': (ImageTestsConfiguration, True),
156 'InfrastructureConfigurationArn': (basestring, True),
157 'Tags': (json_checker, False),
158 }
159
[end of troposphere/imagebuilder.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/troposphere/imagebuilder.py b/troposphere/imagebuilder.py
--- a/troposphere/imagebuilder.py
+++ b/troposphere/imagebuilder.py
@@ -4,8 +4,8 @@
# See LICENSE file for full license.
from . import AWSObject, AWSProperty
-from .validators import (integer, boolean, json_checker,
- component_platforms, imagepipeline_status,
+from .validators import (integer, boolean, component_platforms,
+ imagepipeline_status,
schedule_pipelineexecutionstartcondition,
ebsinstanceblockdevicespecification_volume_type)
@@ -36,7 +36,7 @@
'SecurityGroupIds': ([basestring], False),
'SnsTopicArn': (basestring, False),
'SubnetId': (basestring, False),
- 'Tags': (json_checker, False),
+ 'Tags': (dict, False),
'TerminateInstanceOnFailure': (boolean, False)
}
@@ -57,7 +57,7 @@
props = {
'DeviceName': (basestring, False),
'Ebs': (EbsInstanceBlockDeviceSpecification, False),
- 'NoDevice': (boolean, False),
+ 'NoDevice': (basestring, False),
'VirtualName': (basestring, False),
}
@@ -77,7 +77,7 @@
'Description': (basestring, False),
'Name': (basestring, True),
'ParentImage': (basestring, True),
- 'Tags': (json_checker, False),
+ 'Tags': (dict, False),
'Version': (basestring, True)
}
@@ -108,13 +108,13 @@
'Name': (basestring, True),
'Schedule': (Schedule, False),
'Status': (imagepipeline_status, False),
- 'Tags': (json_checker, False),
+ 'Tags': (dict, False),
}
class Distribution(AWSProperty):
props = {
- 'AmiDistributionConfiguration': (json_checker, False),
+ 'AmiDistributionConfiguration': (dict, False),
'LicenseConfigurationArns': ([basestring], False),
'Region': (basestring, False),
}
@@ -127,7 +127,7 @@
'Description': (basestring, False),
'Distributions': ([Distribution], True),
'Name': (basestring, True),
- 'Tags': (json_checker, False),
+ 'Tags': (dict, False),
}
@@ -136,11 +136,12 @@
props = {
'ChangeDescription': (basestring, False),
+ 'Data': (basestring, False),
'Description': (basestring, False),
'KmsKeyId': (basestring, False),
'Name': (basestring, True),
'Platform': (component_platforms, True),
- 'Tags': (json_checker, False),
+ 'Tags': (dict, False),
'Uri': (basestring, False),
'Version': (basestring, True),
}
@@ -154,5 +155,5 @@
'ImageRecipeArn': (basestring, True),
'ImageTestsConfiguration': (ImageTestsConfiguration, True),
'InfrastructureConfigurationArn': (basestring, True),
- 'Tags': (json_checker, False),
+ 'Tags': (dict, False),
}
| {"golden_diff": "diff --git a/troposphere/imagebuilder.py b/troposphere/imagebuilder.py\n--- a/troposphere/imagebuilder.py\n+++ b/troposphere/imagebuilder.py\n@@ -4,8 +4,8 @@\n # See LICENSE file for full license.\n \n from . import AWSObject, AWSProperty\n-from .validators import (integer, boolean, json_checker,\n- component_platforms, imagepipeline_status,\n+from .validators import (integer, boolean, component_platforms,\n+ imagepipeline_status,\n schedule_pipelineexecutionstartcondition,\n ebsinstanceblockdevicespecification_volume_type)\n \n@@ -36,7 +36,7 @@\n 'SecurityGroupIds': ([basestring], False),\n 'SnsTopicArn': (basestring, False),\n 'SubnetId': (basestring, False),\n- 'Tags': (json_checker, False),\n+ 'Tags': (dict, False),\n 'TerminateInstanceOnFailure': (boolean, False)\n }\n \n@@ -57,7 +57,7 @@\n props = {\n 'DeviceName': (basestring, False),\n 'Ebs': (EbsInstanceBlockDeviceSpecification, False),\n- 'NoDevice': (boolean, False),\n+ 'NoDevice': (basestring, False),\n 'VirtualName': (basestring, False),\n }\n \n@@ -77,7 +77,7 @@\n 'Description': (basestring, False),\n 'Name': (basestring, True),\n 'ParentImage': (basestring, True),\n- 'Tags': (json_checker, False),\n+ 'Tags': (dict, False),\n 'Version': (basestring, True)\n }\n \n@@ -108,13 +108,13 @@\n 'Name': (basestring, True),\n 'Schedule': (Schedule, False),\n 'Status': (imagepipeline_status, False),\n- 'Tags': (json_checker, False),\n+ 'Tags': (dict, False),\n }\n \n \n class Distribution(AWSProperty):\n props = {\n- 'AmiDistributionConfiguration': (json_checker, False),\n+ 'AmiDistributionConfiguration': (dict, False),\n 'LicenseConfigurationArns': ([basestring], False),\n 'Region': (basestring, False),\n }\n@@ -127,7 +127,7 @@\n 'Description': (basestring, False),\n 'Distributions': ([Distribution], True),\n 'Name': (basestring, True),\n- 'Tags': (json_checker, False),\n+ 'Tags': (dict, False),\n }\n \n \n@@ -136,11 +136,12 @@\n \n props = {\n 'ChangeDescription': (basestring, False),\n+ 'Data': (basestring, False),\n 'Description': (basestring, False),\n 'KmsKeyId': (basestring, False),\n 'Name': (basestring, True),\n 'Platform': (component_platforms, True),\n- 'Tags': (json_checker, False),\n+ 'Tags': (dict, False),\n 'Uri': (basestring, False),\n 'Version': (basestring, True),\n }\n@@ -154,5 +155,5 @@\n 'ImageRecipeArn': (basestring, True),\n 'ImageTestsConfiguration': (ImageTestsConfiguration, True),\n 'InfrastructureConfigurationArn': (basestring, True),\n- 'Tags': (json_checker, False),\n+ 'Tags': (dict, False),\n }\n", "issue": "AWS::ImageBuilder::* some wrong data types and attribut missing\nIn imagebuilder.py (2.6.1 release).\r\n\r\n* In AWS::ImageBuilder::*\r\n\"Tags\" are \"json_checker\" but should be dict\r\nWhen encoded in the structure it becomes a string (validator.py, ligne 258,` json.dumps(prop)`) which is creating an issue with CloudFormation that expect a struct like : `\"Tags\" : {Key : Value, ...}`\r\n* AWS::ImageBuilder::DistributionConfiguration::Distribution\r\n\"AmiDistributionConfiguration\" is \"json_checker\" but should be dict.\r\nFor the same as above \"Tags\" \r\n* In AWS::ImageBuilder::Component\r\n\"Data\" is missing. And should be \"json_checker\" in that case.\r\n* In AWS::ImageBuilder::ImageRecipe::InstanceBlockDeviceMapping\r\n\"NoDevice\" is boolean but should be a string\r\n\n", "before_files": [{"content": "# Copyright (c) 2020, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty\nfrom .validators import (integer, boolean, json_checker,\n component_platforms, imagepipeline_status,\n schedule_pipelineexecutionstartcondition,\n ebsinstanceblockdevicespecification_volume_type)\n\n\nclass S3Logs(AWSProperty):\n props = {\n \"S3BucketName\": (basestring, False),\n \"S3KeyPrefix\": (basestring, False),\n }\n\n\nclass Logging(AWSProperty):\n props = {\n 'S3Logs': (S3Logs, False),\n }\n\n\nclass InfrastructureConfiguration(AWSObject):\n resource_type = \"AWS::ImageBuilder::InfrastructureConfiguration\"\n\n props = {\n 'Description': (basestring, False),\n 'InstanceProfileName': (basestring, True),\n 'InstanceTypes': ([basestring], False),\n 'KeyPair': (basestring, False),\n 'Logging': (Logging, False),\n 'Name': (basestring, True),\n 'SecurityGroupIds': ([basestring], False),\n 'SnsTopicArn': (basestring, False),\n 'SubnetId': (basestring, False),\n 'Tags': (json_checker, False),\n 'TerminateInstanceOnFailure': (boolean, False)\n }\n\n\nclass EbsInstanceBlockDeviceSpecification(AWSProperty):\n props = {\n 'DeleteOnTermination': (boolean, False),\n 'Encrypted': (boolean, False),\n 'Iops': (integer, False),\n 'KmsKeyId': (basestring, False),\n 'SnapshotId': (basestring, False),\n 'VolumeSize': (integer, False),\n 'VolumeType': (ebsinstanceblockdevicespecification_volume_type, False),\n }\n\n\nclass InstanceBlockDeviceMapping(AWSProperty):\n props = {\n 'DeviceName': (basestring, False),\n 'Ebs': (EbsInstanceBlockDeviceSpecification, False),\n 'NoDevice': (boolean, False),\n 'VirtualName': (basestring, False),\n }\n\n\nclass ComponentConfiguration(AWSProperty):\n props = {\n 'ComponentArn': (basestring, False),\n }\n\n\nclass ImageRecipe(AWSObject):\n resource_type = \"AWS::ImageBuilder::ImageRecipe\"\n\n props = {\n 'BlockDeviceMappings': ([InstanceBlockDeviceMapping], False),\n 'Components': ([ComponentConfiguration], True),\n 'Description': (basestring, False),\n 'Name': (basestring, True),\n 'ParentImage': (basestring, True),\n 'Tags': (json_checker, False),\n 'Version': (basestring, True)\n }\n\n\nclass ImageTestsConfiguration(AWSProperty):\n props = {\n 'ImageTestsEnabled': (boolean, False),\n 'TimeoutMinutes': (integer, False),\n }\n\n\nclass Schedule(AWSProperty):\n props = {\n 'PipelineExecutionStartCondition': (schedule_pipelineexecutionstartcondition, False), # NOQA\n 'ScheduleExpression': (basestring, False),\n }\n\n\nclass ImagePipeline(AWSObject):\n resource_type = \"AWS::ImageBuilder::ImagePipeline\"\n\n props = {\n 'Description': (basestring, False),\n 'DistributionConfigurationArn': (basestring, False),\n 'ImageRecipeArn': (basestring, True),\n 'ImageTestsConfiguration': (ImageTestsConfiguration, False),\n 'InfrastructureConfigurationArn': (basestring, True),\n 'Name': (basestring, True),\n 'Schedule': (Schedule, False),\n 'Status': (imagepipeline_status, False),\n 'Tags': (json_checker, False),\n }\n\n\nclass Distribution(AWSProperty):\n props = {\n 'AmiDistributionConfiguration': (json_checker, False),\n 'LicenseConfigurationArns': ([basestring], False),\n 'Region': (basestring, False),\n }\n\n\nclass DistributionConfiguration(AWSObject):\n resource_type = \"AWS::ImageBuilder::DistributionConfiguration\"\n\n props = {\n 'Description': (basestring, False),\n 'Distributions': ([Distribution], True),\n 'Name': (basestring, True),\n 'Tags': (json_checker, False),\n }\n\n\nclass Component(AWSObject):\n resource_type = \"AWS::ImageBuilder::Component\"\n\n props = {\n 'ChangeDescription': (basestring, False),\n 'Description': (basestring, False),\n 'KmsKeyId': (basestring, False),\n 'Name': (basestring, True),\n 'Platform': (component_platforms, True),\n 'Tags': (json_checker, False),\n 'Uri': (basestring, False),\n 'Version': (basestring, True),\n }\n\n\nclass Image(AWSObject):\n resource_type = \"AWS::ImageBuilder::Image\"\n\n props = {\n 'DistributionConfigurationArn': (basestring, False),\n 'ImageRecipeArn': (basestring, True),\n 'ImageTestsConfiguration': (ImageTestsConfiguration, True),\n 'InfrastructureConfigurationArn': (basestring, True),\n 'Tags': (json_checker, False),\n }\n", "path": "troposphere/imagebuilder.py"}]} | 2,217 | 769 |
gh_patches_debug_14052 | rasdani/github-patches | git_diff | openai__gym-1149 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Why is there a restiction on shape in multi discrete?
https://github.com/openai/gym/blob/422c9c7bb3c3c5a756c3b12dfe99733bfbfe3920/gym/spaces/multi_discrete.py#L10
Why is this imposed?
Say one may need a high dimensional space like a (3,3,3) grid with discretised values for each element in the grid.
It can be very easily circumvented by using np.random.random_sample and passing the shape
Is there some specific reason for doing this?
Also note this example provided in dict_space doesn't work currently.
https://github.com/openai/gym/blob/422c9c7bb3c3c5a756c3b12dfe99733bfbfe3920/gym/spaces/dict_space.py#L22
</issue>
<code>
[start of gym/spaces/multi_discrete.py]
1 import gym
2 import numpy as np
3
4 class MultiDiscrete(gym.Space):
5 def __init__(self, nvec):
6 """
7 nvec: vector of counts of each categorical variable
8 """
9 self.nvec = np.asarray(nvec, dtype=np.int32)
10 assert self.nvec.ndim == 1, 'nvec should be a 1d array (or list) of ints'
11 gym.Space.__init__(self, (self.nvec.size,), np.int8)
12 def sample(self):
13 return (gym.spaces.np_random.rand(self.nvec.size) * self.nvec).astype(self.dtype)
14 def contains(self, x):
15 return (0 <= x).all() and (x < self.nvec).all() and x.dtype.kind in 'ui'
16
17 __contains__ = contains
18
19 def to_jsonable(self, sample_n):
20 return [sample.tolist() for sample in sample_n]
21 def from_jsonable(self, sample_n):
22 return np.array(sample_n)
23
[end of gym/spaces/multi_discrete.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gym/spaces/multi_discrete.py b/gym/spaces/multi_discrete.py
--- a/gym/spaces/multi_discrete.py
+++ b/gym/spaces/multi_discrete.py
@@ -7,10 +7,9 @@
nvec: vector of counts of each categorical variable
"""
self.nvec = np.asarray(nvec, dtype=np.int32)
- assert self.nvec.ndim == 1, 'nvec should be a 1d array (or list) of ints'
- gym.Space.__init__(self, (self.nvec.size,), np.int8)
+ gym.Space.__init__(self, (self.nvec.shape,), np.int8)
def sample(self):
- return (gym.spaces.np_random.rand(self.nvec.size) * self.nvec).astype(self.dtype)
+ return (gym.spaces.np_random.random_sample(self.nvec.shape) * self.nvec).astype(self.dtype)
def contains(self, x):
return (0 <= x).all() and (x < self.nvec).all() and x.dtype.kind in 'ui'
| {"golden_diff": "diff --git a/gym/spaces/multi_discrete.py b/gym/spaces/multi_discrete.py\n--- a/gym/spaces/multi_discrete.py\n+++ b/gym/spaces/multi_discrete.py\n@@ -7,10 +7,9 @@\n nvec: vector of counts of each categorical variable\n \"\"\"\n self.nvec = np.asarray(nvec, dtype=np.int32)\n- assert self.nvec.ndim == 1, 'nvec should be a 1d array (or list) of ints'\n- gym.Space.__init__(self, (self.nvec.size,), np.int8)\n+ gym.Space.__init__(self, (self.nvec.shape,), np.int8)\n def sample(self):\n- return (gym.spaces.np_random.rand(self.nvec.size) * self.nvec).astype(self.dtype)\n+ return (gym.spaces.np_random.random_sample(self.nvec.shape) * self.nvec).astype(self.dtype)\n def contains(self, x):\n return (0 <= x).all() and (x < self.nvec).all() and x.dtype.kind in 'ui'\n", "issue": "Why is there a restiction on shape in multi discrete?\nhttps://github.com/openai/gym/blob/422c9c7bb3c3c5a756c3b12dfe99733bfbfe3920/gym/spaces/multi_discrete.py#L10\r\n\r\nWhy is this imposed?\r\nSay one may need a high dimensional space like a (3,3,3) grid with discretised values for each element in the grid. \r\nIt can be very easily circumvented by using np.random.random_sample and passing the shape\r\nIs there some specific reason for doing this?\r\n\r\nAlso note this example provided in dict_space doesn't work currently.\r\nhttps://github.com/openai/gym/blob/422c9c7bb3c3c5a756c3b12dfe99733bfbfe3920/gym/spaces/dict_space.py#L22\r\n\r\n\n", "before_files": [{"content": "import gym\nimport numpy as np\n\nclass MultiDiscrete(gym.Space):\n def __init__(self, nvec):\n \"\"\"\n nvec: vector of counts of each categorical variable\n \"\"\"\n self.nvec = np.asarray(nvec, dtype=np.int32)\n assert self.nvec.ndim == 1, 'nvec should be a 1d array (or list) of ints'\n gym.Space.__init__(self, (self.nvec.size,), np.int8)\n def sample(self):\n return (gym.spaces.np_random.rand(self.nvec.size) * self.nvec).astype(self.dtype)\n def contains(self, x):\n return (0 <= x).all() and (x < self.nvec).all() and x.dtype.kind in 'ui'\n \n __contains__ = contains\n \n def to_jsonable(self, sample_n):\n return [sample.tolist() for sample in sample_n]\n def from_jsonable(self, sample_n):\n return np.array(sample_n)\n", "path": "gym/spaces/multi_discrete.py"}]} | 1,003 | 247 |
gh_patches_debug_22062 | rasdani/github-patches | git_diff | getsentry__sentry-24250 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
sentry django send_fake_data errors out
## Important Details
How are you running Sentry?
* [ ] On-Premise docker [Version xyz]
* [ ] Saas (sentry.io)
* [x] Other [briefly describe your environment]
Development environment on macOS while working on sentry-plugins, with `sentry devserver`
## Description
When running `sentry django send_fake_data` a stacktrace is raised:
```
Traceback (most recent call last):
File "MYPYTHONPATH/bin/sentry", line 11, in <module>
load_entry_point('sentry', 'console_scripts', 'sentry')()
File "MYPYTHONPATH/src/sentry/src/sentry/runner/__init__.py", line 161, in main
cli(prog_name=get_prog(), obj={}, max_content_width=100)
File "MYPYTHONPATH/lib/python2.7/site-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "MYPYTHONPATH/lib/python2.7/site-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "MYPYTHONPATH/lib/python2.7/site-packages/click/core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "MYPYTHONPATH/lib/python2.7/site-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "MYPYTHONPATH/lib/python2.7/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "MYPYTHONPATH/lib/python2.7/site-packages/click/decorators.py", line 17, in new_func
return f(get_current_context(), *args, **kwargs)
File "MYPYTHONPATH/src/sentry/src/sentry/runner/decorators.py", line 36, in inner
return ctx.invoke(f, *args, **kwargs)
File "MYPYTHONPATH/lib/python2.7/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "MYPYTHONPATH/lib/python2.7/site-packages/click/decorators.py", line 17, in new_func
return f(get_current_context(), *args, **kwargs)
File "MYPYTHONPATH/src/sentry/src/sentry/runner/commands/django.py", line 25, in django
execute_from_command_line(argv=[ctx.command_path] + list(management_args))
File "MYPYTHONPATH/lib/python2.7/site-packages/django/core/management/__init__.py", line 399, in execute_from_command_line
utility.execute()
File "MYPYTHONPATH/lib/python2.7/site-packages/django/core/management/__init__.py", line 392, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "MYPYTHONPATH/lib/python2.7/site-packages/django/core/management/base.py", line 242, in run_from_argv
self.execute(*args, **options.__dict__)
File "MYPYTHONPATH/lib/python2.7/site-packages/django/core/management/base.py", line 285, in execute
output = self.handle(*args, **options)
File "MYPYTHONPATH/src/sentry/src/sentry/management/commands/send_fake_data.py", line 75, in handle
from raven.contrib.django.models import client
ImportError: No module named raven.contrib.django.models
```
## Steps to Reproduce
See above.
### What you expected to happen
Some fake data should magically appear!
### Possible Solution
Appears to be coming from this import:
https://github.com/getsentry/sentry/blob/317c65fbcd0f0b31a4e97c415bcf35256900a286/src/sentry/management/commands/send_fake_data.py#L75
Manually running `pip install raven` appears to solve it - no traceback appears, so I guess having raven installed, that solves the immediate issue.
However, I think that there's no events being sent anyhow - as perceived by this log:
```
[INFO] raven.contrib.django.client.DjangoClient: Raven is not configured (logging is disabled). Please see the documentation for more information. (message=u'Raven is not configured (logging is disabled). Please see the documentation for more information.')
Preparing to send events. Ctrl-C to exit.
```
I saw there's a push to go towards the unified python sdk - should this fix be overall done with that approach?
</issue>
<code>
[start of src/sentry/management/commands/send_fake_data.py]
1 import datetime
2 import itertools
3 import random
4 import time
5
6 from django.core.management.base import BaseCommand, CommandError
7
8
9 def funcs():
10 exceptions = itertools.cycle(
11 [
12 SyntaxError("foo must come before bar"),
13 ValueError("baz is not a valid choice"),
14 TypeError("NoneType cannot be coerced to bar"),
15 NotImplementedError("This feature is not implemented"),
16 ZeroDivisionError("Your math doesn't work"),
17 Exception("An unknown exception"),
18 KeyError("index does not exist"),
19 ]
20 )
21 loggers = itertools.cycle(["root", "foo", "foo.bar"])
22 emails = itertools.cycle(["[email protected]", "[email protected]", "[email protected]"])
23 timestamp_max = int(datetime.timedelta(days=1).total_seconds())
24
25 def exception(client):
26 timestamp = datetime.datetime.utcnow() - datetime.timedelta(
27 seconds=random.randint(0, timestamp_max)
28 )
29 try:
30 raise next(exceptions)
31 except Exception:
32 email = next(emails)
33 return client.captureException(
34 data={
35 "logger": next(loggers),
36 "site": "web",
37 "user": {"id": email, "email": email},
38 },
39 date=timestamp,
40 )
41
42 return [exception]
43
44
45 class Command(BaseCommand):
46 help = "Sends fake data to the internal Sentry project"
47
48 def add_arguments(self, parser):
49 parser.add_argument(
50 "--project", dest="project", help="project ID or organization-slug/project-slug"
51 )
52 parser.add_argument("--num", dest="num_events", type=int),
53
54 def handle(self, **options):
55 from django.conf import settings
56 from raven.contrib.django.models import client
57 from sentry.models import Project
58
59 if not options["project"]:
60 project = Project.objects.get(id=settings.SENTRY_PROJECT)
61 else:
62 if options["project"].isdigit():
63 project = Project.objects.get(id=options["project"])
64 elif "/" in options["project"]:
65 o_slug, p_slug = options["project"].split("/", 1)
66 project = Project.objects.get(slug=p_slug, organization__slug=o_slug)
67 else:
68 raise CommandError(
69 "Project must be specified as organization-slug/project-slug or a project id"
70 )
71
72 client.project = project.id
73
74 self.stdout.write("Preparing to send events. Ctrl-C to exit.")
75
76 time.sleep(2)
77
78 functions = funcs()
79
80 if options["num_events"]:
81 max_events = options["num_events"]
82 else:
83 max_events = -1
84
85 s = time.time()
86 r = 0
87 try:
88 while True:
89 if r == max_events:
90 break
91 if options["verbosity"] > 1:
92 self.stdout.write("Sending event..\n")
93 random.choice(functions)(client)
94 r += 1
95 except KeyboardInterrupt:
96 pass
97 finally:
98 total_time = time.time() - s
99 self.stdout.write("%d requests serviced in %.3fs\n" % (r, total_time))
100 if r:
101 avg = total_time / r
102 ravg = 1 / avg
103 else:
104 avg = ravg = 0
105 self.stdout.write("avg of %.3fs/req, %d req/s\n" % (avg, ravg))
106
[end of src/sentry/management/commands/send_fake_data.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/management/commands/send_fake_data.py b/src/sentry/management/commands/send_fake_data.py
--- a/src/sentry/management/commands/send_fake_data.py
+++ b/src/sentry/management/commands/send_fake_data.py
@@ -28,16 +28,14 @@
)
try:
raise next(exceptions)
- except Exception:
+ except Exception as exc:
email = next(emails)
- return client.captureException(
- data={
- "logger": next(loggers),
- "site": "web",
- "user": {"id": email, "email": email},
- },
- date=timestamp,
- )
+ with client.configure_scope() as scope:
+ scope.user = {"id": email, "email": email}
+ scope.logger = next(loggers)
+ scope.site = "web"
+ scope.date = timestamp
+ return client.captureException(exc)
return [exception]
@@ -53,7 +51,7 @@
def handle(self, **options):
from django.conf import settings
- from raven.contrib.django.models import client
+ from sentry.app import client
from sentry.models import Project
if not options["project"]:
| {"golden_diff": "diff --git a/src/sentry/management/commands/send_fake_data.py b/src/sentry/management/commands/send_fake_data.py\n--- a/src/sentry/management/commands/send_fake_data.py\n+++ b/src/sentry/management/commands/send_fake_data.py\n@@ -28,16 +28,14 @@\n )\n try:\n raise next(exceptions)\n- except Exception:\n+ except Exception as exc:\n email = next(emails)\n- return client.captureException(\n- data={\n- \"logger\": next(loggers),\n- \"site\": \"web\",\n- \"user\": {\"id\": email, \"email\": email},\n- },\n- date=timestamp,\n- )\n+ with client.configure_scope() as scope:\n+ scope.user = {\"id\": email, \"email\": email}\n+ scope.logger = next(loggers)\n+ scope.site = \"web\"\n+ scope.date = timestamp\n+ return client.captureException(exc)\n \n return [exception]\n \n@@ -53,7 +51,7 @@\n \n def handle(self, **options):\n from django.conf import settings\n- from raven.contrib.django.models import client\n+ from sentry.app import client\n from sentry.models import Project\n \n if not options[\"project\"]:\n", "issue": "sentry django send_fake_data errors out\n## Important Details\r\n\r\nHow are you running Sentry?\r\n\r\n* [ ] On-Premise docker [Version xyz]\r\n* [ ] Saas (sentry.io)\r\n* [x] Other [briefly describe your environment]\r\nDevelopment environment on macOS while working on sentry-plugins, with `sentry devserver`\r\n\r\n## Description\r\n\r\nWhen running `sentry django send_fake_data` a stacktrace is raised:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"MYPYTHONPATH/bin/sentry\", line 11, in <module>\r\n load_entry_point('sentry', 'console_scripts', 'sentry')()\r\n File \"MYPYTHONPATH/src/sentry/src/sentry/runner/__init__.py\", line 161, in main\r\n cli(prog_name=get_prog(), obj={}, max_content_width=100)\r\n File \"MYPYTHONPATH/lib/python2.7/site-packages/click/core.py\", line 722, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"MYPYTHONPATH/lib/python2.7/site-packages/click/core.py\", line 697, in main\r\n rv = self.invoke(ctx)\r\n File \"MYPYTHONPATH/lib/python2.7/site-packages/click/core.py\", line 1066, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"MYPYTHONPATH/lib/python2.7/site-packages/click/core.py\", line 895, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"MYPYTHONPATH/lib/python2.7/site-packages/click/core.py\", line 535, in invoke\r\n return callback(*args, **kwargs)\r\n File \"MYPYTHONPATH/lib/python2.7/site-packages/click/decorators.py\", line 17, in new_func\r\n return f(get_current_context(), *args, **kwargs)\r\n File \"MYPYTHONPATH/src/sentry/src/sentry/runner/decorators.py\", line 36, in inner\r\n return ctx.invoke(f, *args, **kwargs)\r\n File \"MYPYTHONPATH/lib/python2.7/site-packages/click/core.py\", line 535, in invoke\r\n return callback(*args, **kwargs)\r\n File \"MYPYTHONPATH/lib/python2.7/site-packages/click/decorators.py\", line 17, in new_func\r\n return f(get_current_context(), *args, **kwargs)\r\n File \"MYPYTHONPATH/src/sentry/src/sentry/runner/commands/django.py\", line 25, in django\r\n execute_from_command_line(argv=[ctx.command_path] + list(management_args))\r\n File \"MYPYTHONPATH/lib/python2.7/site-packages/django/core/management/__init__.py\", line 399, in execute_from_command_line\r\n utility.execute()\r\n File \"MYPYTHONPATH/lib/python2.7/site-packages/django/core/management/__init__.py\", line 392, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \"MYPYTHONPATH/lib/python2.7/site-packages/django/core/management/base.py\", line 242, in run_from_argv\r\n self.execute(*args, **options.__dict__)\r\n File \"MYPYTHONPATH/lib/python2.7/site-packages/django/core/management/base.py\", line 285, in execute\r\n output = self.handle(*args, **options)\r\n File \"MYPYTHONPATH/src/sentry/src/sentry/management/commands/send_fake_data.py\", line 75, in handle\r\n from raven.contrib.django.models import client\r\nImportError: No module named raven.contrib.django.models\r\n```\r\n\r\n## Steps to Reproduce\r\n\r\nSee above.\r\n\r\n### What you expected to happen\r\n\r\nSome fake data should magically appear!\r\n\r\n### Possible Solution\r\n\r\nAppears to be coming from this import:\r\nhttps://github.com/getsentry/sentry/blob/317c65fbcd0f0b31a4e97c415bcf35256900a286/src/sentry/management/commands/send_fake_data.py#L75\r\n\r\nManually running `pip install raven` appears to solve it - no traceback appears, so I guess having raven installed, that solves the immediate issue.\r\n\r\nHowever, I think that there's no events being sent anyhow - as perceived by this log:\r\n```\r\n[INFO] raven.contrib.django.client.DjangoClient: Raven is not configured (logging is disabled). Please see the documentation for more information. (message=u'Raven is not configured (logging is disabled). Please see the documentation for more information.')\r\nPreparing to send events. Ctrl-C to exit.\r\n```\r\n\r\nI saw there's a push to go towards the unified python sdk - should this fix be overall done with that approach?\r\n\n", "before_files": [{"content": "import datetime\nimport itertools\nimport random\nimport time\n\nfrom django.core.management.base import BaseCommand, CommandError\n\n\ndef funcs():\n exceptions = itertools.cycle(\n [\n SyntaxError(\"foo must come before bar\"),\n ValueError(\"baz is not a valid choice\"),\n TypeError(\"NoneType cannot be coerced to bar\"),\n NotImplementedError(\"This feature is not implemented\"),\n ZeroDivisionError(\"Your math doesn't work\"),\n Exception(\"An unknown exception\"),\n KeyError(\"index does not exist\"),\n ]\n )\n loggers = itertools.cycle([\"root\", \"foo\", \"foo.bar\"])\n emails = itertools.cycle([\"[email protected]\", \"[email protected]\", \"[email protected]\"])\n timestamp_max = int(datetime.timedelta(days=1).total_seconds())\n\n def exception(client):\n timestamp = datetime.datetime.utcnow() - datetime.timedelta(\n seconds=random.randint(0, timestamp_max)\n )\n try:\n raise next(exceptions)\n except Exception:\n email = next(emails)\n return client.captureException(\n data={\n \"logger\": next(loggers),\n \"site\": \"web\",\n \"user\": {\"id\": email, \"email\": email},\n },\n date=timestamp,\n )\n\n return [exception]\n\n\nclass Command(BaseCommand):\n help = \"Sends fake data to the internal Sentry project\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--project\", dest=\"project\", help=\"project ID or organization-slug/project-slug\"\n )\n parser.add_argument(\"--num\", dest=\"num_events\", type=int),\n\n def handle(self, **options):\n from django.conf import settings\n from raven.contrib.django.models import client\n from sentry.models import Project\n\n if not options[\"project\"]:\n project = Project.objects.get(id=settings.SENTRY_PROJECT)\n else:\n if options[\"project\"].isdigit():\n project = Project.objects.get(id=options[\"project\"])\n elif \"/\" in options[\"project\"]:\n o_slug, p_slug = options[\"project\"].split(\"/\", 1)\n project = Project.objects.get(slug=p_slug, organization__slug=o_slug)\n else:\n raise CommandError(\n \"Project must be specified as organization-slug/project-slug or a project id\"\n )\n\n client.project = project.id\n\n self.stdout.write(\"Preparing to send events. Ctrl-C to exit.\")\n\n time.sleep(2)\n\n functions = funcs()\n\n if options[\"num_events\"]:\n max_events = options[\"num_events\"]\n else:\n max_events = -1\n\n s = time.time()\n r = 0\n try:\n while True:\n if r == max_events:\n break\n if options[\"verbosity\"] > 1:\n self.stdout.write(\"Sending event..\\n\")\n random.choice(functions)(client)\n r += 1\n except KeyboardInterrupt:\n pass\n finally:\n total_time = time.time() - s\n self.stdout.write(\"%d requests serviced in %.3fs\\n\" % (r, total_time))\n if r:\n avg = total_time / r\n ravg = 1 / avg\n else:\n avg = ravg = 0\n self.stdout.write(\"avg of %.3fs/req, %d req/s\\n\" % (avg, ravg))\n", "path": "src/sentry/management/commands/send_fake_data.py"}]} | 2,538 | 283 |
gh_patches_debug_28494 | rasdani/github-patches | git_diff | goauthentik__authentik-8147 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Federation & Social Logins // OAuth - Azure AD // Unable to save settings
**Describe the bug**
See https://github.com/goauthentik/authentik/issues/7744
Problem still exists with release 2023.10.5
**To Reproduce**
Steps to reproduce the behavior:
See https://github.com/goauthentik/authentik/issues/7744
Neither on initial save or update afterwards 'Authorization URL', 'Access token URL ' and 'Profile URL' are changed
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Logs**
Seem fine
server:
2023-12-22 11:30:41 {"action": "model_updated", "auth_via": "session", "client_ip": "172.18.0.1", "context": {"http_request": {"args": {}, "method": "PATCH", "path": "/api/v3/sources/oauth/azure-ad/", "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:120.0) Gecko/20100101 Firefox/120.0"}, "model": {"app": "authentik_sources_oauth", "model_name": "oauthsource", "name": "azure ad", "pk": "bda8c9beeac54e5495c8270838e7a797"}}, "event": "Created Event", "host": "localhost:8044", "level": "info", "logger": "authentik.events.models", "pid": 40, "request_id": "ff436debac13400e94274999b5f5d06d", "timestamp": "2023-12-22T10:30:41.555035", "user": {"email": "[email protected]", "pk": 3, "username": "Admin"}}
2023-12-22 11:30:41 {"auth_via": "session", "event": "Task published", "host": "localhost:8044", "level": "info", "logger": "authentik.root.celery", "pid": 40, "request_id": "ff436debac13400e94274999b5f5d06d", "task_id": "4ebe0d05335f4700b8c0871d6f10235c", "task_name": "authentik.events.tasks.event_notification_handler", "timestamp": "2023-12-22T10:30:41.568890"}
2023-12-22 11:30:41 {"auth_via": "session", "event": "/api/v3/sources/oauth/azure-ad/", "host": "localhost:8044", "level": "info", "logger": "authentik.asgi", "method": "PATCH", "pid": 40, "remote": "172.18.0.1", "request_id": "ff436debac13400e94274999b5f5d06d", "runtime": 396, "scheme": "http", "status": 200, "timestamp": "2023-12-22T10:30:41.572086", "user": "Admin", "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:120.0) Gecko/20100101 Firefox/120.0"}
**Version and Deployment (please complete the following information):**
- authentik version: 2021.10.5
- Deployment: docker-compose, helm
**Additional context**
</issue>
<code>
[start of authentik/sources/oauth/api/source.py]
1 """OAuth Source Serializer"""
2 from django.urls.base import reverse_lazy
3 from django_filters.filters import BooleanFilter
4 from django_filters.filterset import FilterSet
5 from drf_spectacular.types import OpenApiTypes
6 from drf_spectacular.utils import OpenApiParameter, extend_schema, extend_schema_field
7 from requests import RequestException
8 from rest_framework.decorators import action
9 from rest_framework.fields import BooleanField, CharField, ChoiceField, SerializerMethodField
10 from rest_framework.request import Request
11 from rest_framework.response import Response
12 from rest_framework.serializers import ValidationError
13 from rest_framework.viewsets import ModelViewSet
14
15 from authentik.core.api.sources import SourceSerializer
16 from authentik.core.api.used_by import UsedByMixin
17 from authentik.core.api.utils import PassiveSerializer
18 from authentik.lib.utils.http import get_http_session
19 from authentik.sources.oauth.models import OAuthSource
20 from authentik.sources.oauth.types.registry import SourceType, registry
21
22
23 class SourceTypeSerializer(PassiveSerializer):
24 """Serializer for SourceType"""
25
26 name = CharField(required=True)
27 verbose_name = CharField(required=True)
28 urls_customizable = BooleanField()
29 request_token_url = CharField(read_only=True, allow_null=True)
30 authorization_url = CharField(read_only=True, allow_null=True)
31 access_token_url = CharField(read_only=True, allow_null=True)
32 profile_url = CharField(read_only=True, allow_null=True)
33 oidc_well_known_url = CharField(read_only=True, allow_null=True)
34 oidc_jwks_url = CharField(read_only=True, allow_null=True)
35
36
37 class OAuthSourceSerializer(SourceSerializer):
38 """OAuth Source Serializer"""
39
40 provider_type = ChoiceField(choices=registry.get_name_tuple())
41 callback_url = SerializerMethodField()
42 type = SerializerMethodField()
43
44 def get_callback_url(self, instance: OAuthSource) -> str:
45 """Get OAuth Callback URL"""
46 relative_url = reverse_lazy(
47 "authentik_sources_oauth:oauth-client-callback",
48 kwargs={"source_slug": instance.slug},
49 )
50 if "request" not in self.context:
51 return relative_url
52 return self.context["request"].build_absolute_uri(relative_url)
53
54 @extend_schema_field(SourceTypeSerializer)
55 def get_type(self, instance: OAuthSource) -> SourceTypeSerializer:
56 """Get source's type configuration"""
57 return SourceTypeSerializer(instance.source_type).data
58
59 def validate(self, attrs: dict) -> dict:
60 session = get_http_session()
61 source_type = registry.find_type(attrs["provider_type"])
62
63 well_known = attrs.get("oidc_well_known_url") or source_type.oidc_well_known_url
64 inferred_oidc_jwks_url = None
65
66 if well_known and well_known != "":
67 try:
68 well_known_config = session.get(well_known)
69 well_known_config.raise_for_status()
70 except RequestException as exc:
71 text = exc.response.text if exc.response else str(exc)
72 raise ValidationError({"oidc_well_known_url": text})
73 config = well_known_config.json()
74 if "issuer" not in config:
75 raise ValidationError({"oidc_well_known_url": "Invalid well-known configuration"})
76 attrs["authorization_url"] = config.get("authorization_endpoint", "")
77 attrs["access_token_url"] = config.get("token_endpoint", "")
78 attrs["profile_url"] = config.get("userinfo_endpoint", "")
79 inferred_oidc_jwks_url = config.get("jwks_uri", "")
80
81 # Prefer user-entered URL to inferred URL to default URL
82 jwks_url = attrs.get("oidc_jwks_url") or inferred_oidc_jwks_url or source_type.oidc_jwks_url
83 if jwks_url and jwks_url != "":
84 attrs["oidc_jwks_url"] = jwks_url
85 try:
86 jwks_config = session.get(jwks_url)
87 jwks_config.raise_for_status()
88 except RequestException as exc:
89 text = exc.response.text if exc.response else str(exc)
90 raise ValidationError({"oidc_jwks_url": text})
91 config = jwks_config.json()
92 attrs["oidc_jwks"] = config
93
94 provider_type = registry.find_type(attrs.get("provider_type", ""))
95 for url in [
96 "authorization_url",
97 "access_token_url",
98 "profile_url",
99 ]:
100 if getattr(provider_type, url, None) is None:
101 if url not in attrs:
102 raise ValidationError(
103 f"{url} is required for provider {provider_type.verbose_name}"
104 )
105 return attrs
106
107 class Meta:
108 model = OAuthSource
109 fields = SourceSerializer.Meta.fields + [
110 "provider_type",
111 "request_token_url",
112 "authorization_url",
113 "access_token_url",
114 "profile_url",
115 "consumer_key",
116 "consumer_secret",
117 "callback_url",
118 "additional_scopes",
119 "type",
120 "oidc_well_known_url",
121 "oidc_jwks_url",
122 "oidc_jwks",
123 ]
124 extra_kwargs = {"consumer_secret": {"write_only": True}}
125
126
127 class OAuthSourceFilter(FilterSet):
128 """OAuth Source filter set"""
129
130 has_jwks = BooleanFilter(label="Only return sources with JWKS data", method="filter_has_jwks")
131
132 def filter_has_jwks(self, queryset, name, value): # pragma: no cover
133 """Only return sources with JWKS data"""
134 return queryset.exclude(oidc_jwks__iexact="{}")
135
136 class Meta:
137 model = OAuthSource
138 fields = [
139 "name",
140 "slug",
141 "enabled",
142 "authentication_flow",
143 "enrollment_flow",
144 "policy_engine_mode",
145 "user_matching_mode",
146 "provider_type",
147 "request_token_url",
148 "authorization_url",
149 "access_token_url",
150 "profile_url",
151 "consumer_key",
152 "additional_scopes",
153 ]
154
155
156 class OAuthSourceViewSet(UsedByMixin, ModelViewSet):
157 """Source Viewset"""
158
159 queryset = OAuthSource.objects.all()
160 serializer_class = OAuthSourceSerializer
161 lookup_field = "slug"
162 filterset_class = OAuthSourceFilter
163 search_fields = ["name", "slug"]
164 ordering = ["name"]
165
166 @extend_schema(
167 responses={200: SourceTypeSerializer(many=True)},
168 parameters=[
169 OpenApiParameter(
170 name="name",
171 location=OpenApiParameter.QUERY,
172 type=OpenApiTypes.STR,
173 )
174 ],
175 )
176 @action(detail=False, pagination_class=None, filter_backends=[])
177 def source_types(self, request: Request) -> Response:
178 """Get all creatable source types. If ?name is set, only returns the type for <name>.
179 If <name> isn't found, returns the default type."""
180 data = []
181 if "name" in request.query_params:
182 source_type = registry.find_type(request.query_params.get("name"))
183 if source_type.__class__ != SourceType:
184 data.append(SourceTypeSerializer(source_type).data)
185 else:
186 for source_type in registry.get():
187 data.append(SourceTypeSerializer(source_type).data)
188 return Response(data)
189
[end of authentik/sources/oauth/api/source.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/authentik/sources/oauth/api/source.py b/authentik/sources/oauth/api/source.py
--- a/authentik/sources/oauth/api/source.py
+++ b/authentik/sources/oauth/api/source.py
@@ -56,6 +56,7 @@
"""Get source's type configuration"""
return SourceTypeSerializer(instance.source_type).data
+ # pylint: disable=too-many-locals
def validate(self, attrs: dict) -> dict:
session = get_http_session()
source_type = registry.find_type(attrs["provider_type"])
@@ -73,9 +74,17 @@
config = well_known_config.json()
if "issuer" not in config:
raise ValidationError({"oidc_well_known_url": "Invalid well-known configuration"})
- attrs["authorization_url"] = config.get("authorization_endpoint", "")
- attrs["access_token_url"] = config.get("token_endpoint", "")
- attrs["profile_url"] = config.get("userinfo_endpoint", "")
+ field_map = {
+ # authentik field to oidc field
+ "authorization_url": "authorization_endpoint",
+ "access_token_url": "token_endpoint",
+ "profile_url": "userinfo_endpoint",
+ }
+ for ak_key, oidc_key in field_map.items():
+ # Don't overwrite user-set values
+ if ak_key in attrs and attrs[ak_key]:
+ continue
+ attrs[ak_key] = config.get(oidc_key, "")
inferred_oidc_jwks_url = config.get("jwks_uri", "")
# Prefer user-entered URL to inferred URL to default URL
| {"golden_diff": "diff --git a/authentik/sources/oauth/api/source.py b/authentik/sources/oauth/api/source.py\n--- a/authentik/sources/oauth/api/source.py\n+++ b/authentik/sources/oauth/api/source.py\n@@ -56,6 +56,7 @@\n \"\"\"Get source's type configuration\"\"\"\n return SourceTypeSerializer(instance.source_type).data\n \n+ # pylint: disable=too-many-locals\n def validate(self, attrs: dict) -> dict:\n session = get_http_session()\n source_type = registry.find_type(attrs[\"provider_type\"])\n@@ -73,9 +74,17 @@\n config = well_known_config.json()\n if \"issuer\" not in config:\n raise ValidationError({\"oidc_well_known_url\": \"Invalid well-known configuration\"})\n- attrs[\"authorization_url\"] = config.get(\"authorization_endpoint\", \"\")\n- attrs[\"access_token_url\"] = config.get(\"token_endpoint\", \"\")\n- attrs[\"profile_url\"] = config.get(\"userinfo_endpoint\", \"\")\n+ field_map = {\n+ # authentik field to oidc field\n+ \"authorization_url\": \"authorization_endpoint\",\n+ \"access_token_url\": \"token_endpoint\",\n+ \"profile_url\": \"userinfo_endpoint\",\n+ }\n+ for ak_key, oidc_key in field_map.items():\n+ # Don't overwrite user-set values\n+ if ak_key in attrs and attrs[ak_key]:\n+ continue\n+ attrs[ak_key] = config.get(oidc_key, \"\")\n inferred_oidc_jwks_url = config.get(\"jwks_uri\", \"\")\n \n # Prefer user-entered URL to inferred URL to default URL\n", "issue": "Federation & Social Logins // OAuth - Azure AD // Unable to save settings\n**Describe the bug**\r\nSee https://github.com/goauthentik/authentik/issues/7744\r\nProblem still exists with release 2023.10.5\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\nSee https://github.com/goauthentik/authentik/issues/7744\r\n\r\nNeither on initial save or update afterwards 'Authorization URL', 'Access token URL ' and 'Profile URL' are changed\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Logs**\r\nSeem fine\r\nserver:\r\n2023-12-22 11:30:41 {\"action\": \"model_updated\", \"auth_via\": \"session\", \"client_ip\": \"172.18.0.1\", \"context\": {\"http_request\": {\"args\": {}, \"method\": \"PATCH\", \"path\": \"/api/v3/sources/oauth/azure-ad/\", \"user_agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:120.0) Gecko/20100101 Firefox/120.0\"}, \"model\": {\"app\": \"authentik_sources_oauth\", \"model_name\": \"oauthsource\", \"name\": \"azure ad\", \"pk\": \"bda8c9beeac54e5495c8270838e7a797\"}}, \"event\": \"Created Event\", \"host\": \"localhost:8044\", \"level\": \"info\", \"logger\": \"authentik.events.models\", \"pid\": 40, \"request_id\": \"ff436debac13400e94274999b5f5d06d\", \"timestamp\": \"2023-12-22T10:30:41.555035\", \"user\": {\"email\": \"[email protected]\", \"pk\": 3, \"username\": \"Admin\"}}\r\n2023-12-22 11:30:41 {\"auth_via\": \"session\", \"event\": \"Task published\", \"host\": \"localhost:8044\", \"level\": \"info\", \"logger\": \"authentik.root.celery\", \"pid\": 40, \"request_id\": \"ff436debac13400e94274999b5f5d06d\", \"task_id\": \"4ebe0d05335f4700b8c0871d6f10235c\", \"task_name\": \"authentik.events.tasks.event_notification_handler\", \"timestamp\": \"2023-12-22T10:30:41.568890\"}\r\n2023-12-22 11:30:41 {\"auth_via\": \"session\", \"event\": \"/api/v3/sources/oauth/azure-ad/\", \"host\": \"localhost:8044\", \"level\": \"info\", \"logger\": \"authentik.asgi\", \"method\": \"PATCH\", \"pid\": 40, \"remote\": \"172.18.0.1\", \"request_id\": \"ff436debac13400e94274999b5f5d06d\", \"runtime\": 396, \"scheme\": \"http\", \"status\": 200, \"timestamp\": \"2023-12-22T10:30:41.572086\", \"user\": \"Admin\", \"user_agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:120.0) Gecko/20100101 Firefox/120.0\"}\r\n\r\n**Version and Deployment (please complete the following information):**\r\n\r\n- authentik version: 2021.10.5\r\n- Deployment: docker-compose, helm\r\n\r\n**Additional context**\r\n\r\n\n", "before_files": [{"content": "\"\"\"OAuth Source Serializer\"\"\"\nfrom django.urls.base import reverse_lazy\nfrom django_filters.filters import BooleanFilter\nfrom django_filters.filterset import FilterSet\nfrom drf_spectacular.types import OpenApiTypes\nfrom drf_spectacular.utils import OpenApiParameter, extend_schema, extend_schema_field\nfrom requests import RequestException\nfrom rest_framework.decorators import action\nfrom rest_framework.fields import BooleanField, CharField, ChoiceField, SerializerMethodField\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import ValidationError\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom authentik.core.api.sources import SourceSerializer\nfrom authentik.core.api.used_by import UsedByMixin\nfrom authentik.core.api.utils import PassiveSerializer\nfrom authentik.lib.utils.http import get_http_session\nfrom authentik.sources.oauth.models import OAuthSource\nfrom authentik.sources.oauth.types.registry import SourceType, registry\n\n\nclass SourceTypeSerializer(PassiveSerializer):\n \"\"\"Serializer for SourceType\"\"\"\n\n name = CharField(required=True)\n verbose_name = CharField(required=True)\n urls_customizable = BooleanField()\n request_token_url = CharField(read_only=True, allow_null=True)\n authorization_url = CharField(read_only=True, allow_null=True)\n access_token_url = CharField(read_only=True, allow_null=True)\n profile_url = CharField(read_only=True, allow_null=True)\n oidc_well_known_url = CharField(read_only=True, allow_null=True)\n oidc_jwks_url = CharField(read_only=True, allow_null=True)\n\n\nclass OAuthSourceSerializer(SourceSerializer):\n \"\"\"OAuth Source Serializer\"\"\"\n\n provider_type = ChoiceField(choices=registry.get_name_tuple())\n callback_url = SerializerMethodField()\n type = SerializerMethodField()\n\n def get_callback_url(self, instance: OAuthSource) -> str:\n \"\"\"Get OAuth Callback URL\"\"\"\n relative_url = reverse_lazy(\n \"authentik_sources_oauth:oauth-client-callback\",\n kwargs={\"source_slug\": instance.slug},\n )\n if \"request\" not in self.context:\n return relative_url\n return self.context[\"request\"].build_absolute_uri(relative_url)\n\n @extend_schema_field(SourceTypeSerializer)\n def get_type(self, instance: OAuthSource) -> SourceTypeSerializer:\n \"\"\"Get source's type configuration\"\"\"\n return SourceTypeSerializer(instance.source_type).data\n\n def validate(self, attrs: dict) -> dict:\n session = get_http_session()\n source_type = registry.find_type(attrs[\"provider_type\"])\n\n well_known = attrs.get(\"oidc_well_known_url\") or source_type.oidc_well_known_url\n inferred_oidc_jwks_url = None\n\n if well_known and well_known != \"\":\n try:\n well_known_config = session.get(well_known)\n well_known_config.raise_for_status()\n except RequestException as exc:\n text = exc.response.text if exc.response else str(exc)\n raise ValidationError({\"oidc_well_known_url\": text})\n config = well_known_config.json()\n if \"issuer\" not in config:\n raise ValidationError({\"oidc_well_known_url\": \"Invalid well-known configuration\"})\n attrs[\"authorization_url\"] = config.get(\"authorization_endpoint\", \"\")\n attrs[\"access_token_url\"] = config.get(\"token_endpoint\", \"\")\n attrs[\"profile_url\"] = config.get(\"userinfo_endpoint\", \"\")\n inferred_oidc_jwks_url = config.get(\"jwks_uri\", \"\")\n\n # Prefer user-entered URL to inferred URL to default URL\n jwks_url = attrs.get(\"oidc_jwks_url\") or inferred_oidc_jwks_url or source_type.oidc_jwks_url\n if jwks_url and jwks_url != \"\":\n attrs[\"oidc_jwks_url\"] = jwks_url\n try:\n jwks_config = session.get(jwks_url)\n jwks_config.raise_for_status()\n except RequestException as exc:\n text = exc.response.text if exc.response else str(exc)\n raise ValidationError({\"oidc_jwks_url\": text})\n config = jwks_config.json()\n attrs[\"oidc_jwks\"] = config\n\n provider_type = registry.find_type(attrs.get(\"provider_type\", \"\"))\n for url in [\n \"authorization_url\",\n \"access_token_url\",\n \"profile_url\",\n ]:\n if getattr(provider_type, url, None) is None:\n if url not in attrs:\n raise ValidationError(\n f\"{url} is required for provider {provider_type.verbose_name}\"\n )\n return attrs\n\n class Meta:\n model = OAuthSource\n fields = SourceSerializer.Meta.fields + [\n \"provider_type\",\n \"request_token_url\",\n \"authorization_url\",\n \"access_token_url\",\n \"profile_url\",\n \"consumer_key\",\n \"consumer_secret\",\n \"callback_url\",\n \"additional_scopes\",\n \"type\",\n \"oidc_well_known_url\",\n \"oidc_jwks_url\",\n \"oidc_jwks\",\n ]\n extra_kwargs = {\"consumer_secret\": {\"write_only\": True}}\n\n\nclass OAuthSourceFilter(FilterSet):\n \"\"\"OAuth Source filter set\"\"\"\n\n has_jwks = BooleanFilter(label=\"Only return sources with JWKS data\", method=\"filter_has_jwks\")\n\n def filter_has_jwks(self, queryset, name, value): # pragma: no cover\n \"\"\"Only return sources with JWKS data\"\"\"\n return queryset.exclude(oidc_jwks__iexact=\"{}\")\n\n class Meta:\n model = OAuthSource\n fields = [\n \"name\",\n \"slug\",\n \"enabled\",\n \"authentication_flow\",\n \"enrollment_flow\",\n \"policy_engine_mode\",\n \"user_matching_mode\",\n \"provider_type\",\n \"request_token_url\",\n \"authorization_url\",\n \"access_token_url\",\n \"profile_url\",\n \"consumer_key\",\n \"additional_scopes\",\n ]\n\n\nclass OAuthSourceViewSet(UsedByMixin, ModelViewSet):\n \"\"\"Source Viewset\"\"\"\n\n queryset = OAuthSource.objects.all()\n serializer_class = OAuthSourceSerializer\n lookup_field = \"slug\"\n filterset_class = OAuthSourceFilter\n search_fields = [\"name\", \"slug\"]\n ordering = [\"name\"]\n\n @extend_schema(\n responses={200: SourceTypeSerializer(many=True)},\n parameters=[\n OpenApiParameter(\n name=\"name\",\n location=OpenApiParameter.QUERY,\n type=OpenApiTypes.STR,\n )\n ],\n )\n @action(detail=False, pagination_class=None, filter_backends=[])\n def source_types(self, request: Request) -> Response:\n \"\"\"Get all creatable source types. If ?name is set, only returns the type for <name>.\n If <name> isn't found, returns the default type.\"\"\"\n data = []\n if \"name\" in request.query_params:\n source_type = registry.find_type(request.query_params.get(\"name\"))\n if source_type.__class__ != SourceType:\n data.append(SourceTypeSerializer(source_type).data)\n else:\n for source_type in registry.get():\n data.append(SourceTypeSerializer(source_type).data)\n return Response(data)\n", "path": "authentik/sources/oauth/api/source.py"}]} | 3,494 | 353 |
gh_patches_debug_26300 | rasdani/github-patches | git_diff | City-of-Helsinki__linkedevents-155 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use Helsinki servicemap in adding Place in LE admin
</issue>
<code>
[start of linkedevents/settings.py]
1 """
2 Django base settings for linkedevents project.
3
4 For more information on this file, see
5 https://docs.djangoproject.com/en/1.6/topics/settings/
6
7 For the full list of settings and their values, see
8 https://docs.djangoproject.com/en/1.6/ref/settings/
9 """
10
11 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
12 import os
13 BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
14
15 # Quick-start development settings - unsuitable for production
16 # See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
17
18 DEBUG = False
19
20 TEMPLATE_DEBUG = False
21
22 ALLOWED_HOSTS = []
23
24 SITE_ID = 1
25
26 # Application definition
27
28 INSTALLED_APPS = (
29 'helusers',
30 'django.contrib.sites',
31 'modeltranslation',
32 'django.contrib.admin',
33 'django.contrib.auth',
34 'django.contrib.contenttypes',
35 'django.contrib.sessions',
36 'django.contrib.messages',
37 'django.contrib.staticfiles',
38 'django.contrib.gis',
39 'django.contrib.postgres',
40 'django_extensions',
41 'events',
42 'corsheaders',
43 'rest_framework',
44 'rest_framework_jwt',
45 'mptt',
46 'reversion',
47 'haystack',
48 'raven.contrib.django.raven_compat',
49 'django_cleanup',
50
51 'allauth',
52 'allauth.account',
53 'allauth.socialaccount',
54 'helusers.providers.helsinki',
55
56 'helevents',
57 'munigeo',
58 )
59
60 MIDDLEWARE_CLASSES = (
61 'django.contrib.sessions.middleware.SessionMiddleware',
62 'django.middleware.common.CommonMiddleware',
63 'django.middleware.csrf.CsrfViewMiddleware',
64 'django.contrib.auth.middleware.AuthenticationMiddleware',
65 'corsheaders.middleware.CorsMiddleware',
66 'reversion.middleware.RevisionMiddleware',
67 'django.contrib.messages.middleware.MessageMiddleware',
68 'django.middleware.clickjacking.XFrameOptionsMiddleware',
69 )
70
71 ROOT_URLCONF = 'linkedevents.urls'
72
73 WSGI_APPLICATION = 'linkedevents.wsgi.application'
74
75 # Database
76 # https://docs.djangoproject.com/en/1.6/ref/settings/#databases
77
78 DATABASES = {
79 'default': {
80 'ENGINE': 'django.contrib.gis.db.backends.postgis',
81 'NAME': 'linkedevents',
82 }
83 }
84
85 # Internationalization
86 # https://docs.djangoproject.com/en/1.6/topics/i18n/
87
88 LANGUAGES = (
89 ('fi', 'Finnish'),
90 ('sv', 'Swedish'),
91 ('en', 'English'),
92 )
93
94 LANGUAGE_CODE = 'fi'
95
96 TIME_ZONE = 'Europe/Helsinki'
97
98 SYSTEM_DATA_SOURCE_ID = 'system'
99
100
101 USE_I18N = True
102 USE_L10N = True
103 USE_TZ = True
104
105 LOCALE_PATHS = (
106 os.path.join(BASE_DIR, 'locale'),
107 )
108
109 IMPORT_FILE_PATH = os.path.join(BASE_DIR, 'data')
110
111 # Static files (CSS, JavaScript, Images)
112 # https://docs.djangoproject.com/en/1.6/howto/static-files/
113
114 STATIC_URL = '/static/'
115
116 MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
117 MEDIA_URL = '/media/'
118
119 #
120 # Authentication
121 #
122 AUTH_USER_MODEL = 'helevents.User'
123 AUTHENTICATION_BACKENDS = (
124 'django.contrib.auth.backends.ModelBackend',
125 'allauth.account.auth_backends.AuthenticationBackend',
126 )
127 SOCIALACCOUNT_PROVIDERS = {
128 'helsinki': {
129 'VERIFIED_EMAIL': True
130 }
131 }
132 LOGIN_REDIRECT_URL = '/'
133 ACCOUNT_LOGOUT_ON_GET = True
134 SOCIALACCOUNT_ADAPTER = 'helusers.providers.helsinki.provider.SocialAccountAdapter'
135
136 #
137 # REST Framework
138 #
139 REST_FRAMEWORK = {
140 'PAGE_SIZE': 20,
141 'ORDERING_PARAM': 'sort',
142 'DEFAULT_RENDERER_CLASSES': (
143 'events.renderers.JSONRenderer',
144 'events.renderers.JSONLDRenderer',
145 'rest_framework.renderers.BrowsableAPIRenderer',
146 ),
147 'DEFAULT_PARSER_CLASSES': (
148 'events.parsers.CamelCaseJSONParser',
149 'events.parsers.JSONLDParser',
150 'rest_framework.parsers.FormParser',
151 'rest_framework.parsers.MultiPartParser',
152 ),
153 'DEFAULT_FILTER_BACKENDS': (
154 'rest_framework.filters.DjangoFilterBackend',
155 ),
156 'DEFAULT_PAGINATION_CLASS': 'events.api_pagination.CustomPagination',
157 'DEFAULT_PERMISSION_CLASSES': (
158 'rest_framework.permissions.IsAuthenticatedOrReadOnly',
159 ),
160 'DEFAULT_AUTHENTICATION_CLASSES': (
161 'helusers.jwt.JWTAuthentication',
162 ),
163 'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.URLPathVersioning',
164 'VIEW_NAME_FUNCTION': 'events.api.get_view_name',
165 }
166 JWT_AUTH = {
167 'JWT_PAYLOAD_GET_USER_ID_HANDLER': 'helusers.jwt.get_user_id_from_payload_handler',
168 # JWT_AUDIENCE and JWT_SECRET_KEY must be set in local_settings.py
169 }
170
171
172 CORS_ORIGIN_ALLOW_ALL = True
173 CSRF_COOKIE_NAME = 'linkedevents-csrftoken'
174 SESSION_COOKIE_NAME = 'linkedevents-sessionid'
175
176 TEMPLATES = [
177 {
178 'BACKEND': 'django.template.backends.django.DjangoTemplates',
179 'DIRS': [
180 os.path.join(BASE_DIR, 'templates'),
181 ],
182 'APP_DIRS': True,
183 'OPTIONS': {
184 'context_processors': [
185 'django.contrib.auth.context_processors.auth',
186 'django.template.context_processors.debug',
187 'django.template.context_processors.i18n',
188 'django.template.context_processors.media',
189 'django.template.context_processors.static',
190 'django.template.context_processors.tz',
191 'django.contrib.messages.context_processors.messages',
192 ],
193 },
194 },
195 ]
196
197
198 POSTGIS_VERSION = (2, 1, 1)
199
200 # Use ETRS-TM35FIN projection by default
201 PROJECTION_SRID=3067
202 # Bounding box of Finland and then some
203
204 BOUNDING_BOX = [-548576, 6291456, 1548576, 8388608]
205
206 CITYSDK_API_SETTINGS = {
207 'CITYSDK_URL': "http://api.tourism.helsinki.citysdk.eu/CitySDK/",
208 'USERNAME': 'admin',
209 'PASSWORD': 'defaultCitySDKPassword',
210 'SRS_URL': 'http://www.opengis.net/def/crs/EPSG/0/%d' % PROJECTION_SRID,
211 'DEFAULT_POI_CATEGORY': '53562f3238653c0a842a3bf7'
212 }
213
214 HAYSTACK_CONNECTIONS = {
215 'default': {
216 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine'
217 },
218 'default-fi': {
219 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine'
220 },
221 'default-en': {
222 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine'
223 },
224 'default-sv': {
225 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine'
226 }
227 }
228
229 import bleach
230 BLEACH_ALLOWED_TAGS = bleach.ALLOWED_TAGS + ["p", "div"]
231
232 from easy_thumbnails.conf import Settings as thumbnail_settings
233 THUMBNAIL_PROCESSORS = (
234 'image_cropping.thumbnail_processors.crop_corners',
235 ) + thumbnail_settings.THUMBNAIL_PROCESSORS
236
237 # local_settings.py can be used to override environment-specific settings
238 # like database and email that differ between development and production.
239 f = os.path.join(BASE_DIR, "local_settings.py")
240 if os.path.exists(f):
241 import sys
242 import imp
243 module_name = "%s.local_settings" % ROOT_URLCONF.split('.')[0]
244 module = imp.new_module(module_name)
245 module.__file__ = f
246 sys.modules[module_name] = module
247 exec(open(f, "rb").read())
248
249 if 'SECRET_KEY' not in locals():
250 secret_file = os.path.join(BASE_DIR, '.django_secret')
251 try:
252 SECRET_KEY = open(secret_file).read().strip()
253 except IOError:
254 import random
255 system_random = random.SystemRandom()
256 try:
257 SECRET_KEY = ''.join([system_random.choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(64)])
258 secret = open(secret_file, 'w')
259 import os
260 os.chmod(secret_file, 0o0600)
261 secret.write(SECRET_KEY)
262 secret.close()
263 except IOError:
264 Exception('Please create a %s file with random characters to generate your secret key!' % secret_file)
265
[end of linkedevents/settings.py]
[start of events/admin.py]
1 from django.conf import settings
2 from django.contrib import admin
3 from django.contrib.admin.widgets import FilteredSelectMultiple
4 from django.contrib.gis import admin as geoadmin
5 from django.contrib.gis.db import models
6 from django.utils.translation import ugettext as _
7 from modeltranslation.admin import TranslationAdmin
8 from reversion.admin import VersionAdmin
9 from events.api import generate_id
10 from events.models import Event, Keyword, Place, Language, \
11 OpeningHoursSpecification, KeywordLabel, Organization, License
12
13
14 class BaseAdmin(admin.ModelAdmin):
15 exclude = ("created_by", "modified_by",)
16
17 def save_model(self, request, obj, form, change):
18 if obj.pk is None:
19 obj.created_by = request.user
20 else:
21 obj.modified_by = request.user
22 obj.save()
23
24
25 class EventModelAdmin(BaseAdmin, TranslationAdmin, VersionAdmin):
26 pass
27
28
29 class KeywordAdmin(BaseAdmin, TranslationAdmin, VersionAdmin):
30 pass
31
32
33 class PlaceAdmin(geoadmin.GeoModelAdmin, BaseAdmin, TranslationAdmin,
34 VersionAdmin):
35 fieldsets = (
36 (None, {
37 'fields': ('publisher', 'name', 'description', 'info_url', 'position', 'parent')
38
39 }),
40 (_('Contact info'), {
41 'fields': ('email', 'telephone', 'contact_type', 'street_address', 'address_locality', 'address_region',
42 'postal_code', 'post_office_box_num')
43 }),
44 )
45
46 def __init__(self, model, admin_site):
47 super().__init__(model, admin_site)
48 # use https CDN instead
49 self.openlayers_url = 'https://cdnjs.cloudflare.com/ajax/libs/openlayers/2.13.1/OpenLayers.js'
50
51 def save_model(self, request, obj, form, change):
52 system_id = settings.SYSTEM_DATA_SOURCE_ID
53 obj.data_source_id = system_id
54 if not obj.id:
55 obj.id = generate_id(system_id)
56 obj.origin_id = obj.id.split(':')[1]
57
58 super().save_model(request, obj, form, change)
59
60 admin.site.register(Place, PlaceAdmin)
61
62
63 class OrganizationAdmin(BaseAdmin):
64 list_display = ('name', 'nr_org_admins')
65
66 formfield_overrides = {
67 models.ManyToManyField: {'widget': FilteredSelectMultiple("ylläpitäjät", is_stacked=False)},
68 }
69 fields = ('admin_users',)
70
71 def nr_org_admins(self, obj):
72 return obj.admin_users.count()
73 nr_org_admins.short_description = _('Admins')
74
75 admin.site.register(Organization, OrganizationAdmin)
76
77
78 class LanguageAdmin(BaseAdmin, VersionAdmin):
79 pass
80
81
82 class PersonAdmin(BaseAdmin, VersionAdmin):
83 pass
84
85
86 class LicenseAdmin(BaseAdmin, TranslationAdmin, VersionAdmin):
87 def get_readonly_fields(self, request, obj=None):
88 if obj:
89 return ['id']
90 else:
91 return []
92
93 admin.site.register(License, LicenseAdmin)
94
[end of events/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/events/admin.py b/events/admin.py
--- a/events/admin.py
+++ b/events/admin.py
@@ -1,9 +1,9 @@
from django.conf import settings
from django.contrib import admin
from django.contrib.admin.widgets import FilteredSelectMultiple
-from django.contrib.gis import admin as geoadmin
from django.contrib.gis.db import models
from django.utils.translation import ugettext as _
+from leaflet.admin import LeafletGeoAdmin
from modeltranslation.admin import TranslationAdmin
from reversion.admin import VersionAdmin
from events.api import generate_id
@@ -30,8 +30,16 @@
pass
-class PlaceAdmin(geoadmin.GeoModelAdmin, BaseAdmin, TranslationAdmin,
- VersionAdmin):
+class HelsinkiGeoAdmin(LeafletGeoAdmin):
+ settings_overrides = {
+ 'DEFAULT_CENTER': (60.171944, 24.941389),
+ 'DEFAULT_ZOOM': 11,
+ 'MIN_ZOOM': 3,
+ 'MAX_ZOOM': 19,
+ }
+
+
+class PlaceAdmin(HelsinkiGeoAdmin, BaseAdmin, TranslationAdmin, VersionAdmin):
fieldsets = (
(None, {
'fields': ('publisher', 'name', 'description', 'info_url', 'position', 'parent')
diff --git a/linkedevents/settings.py b/linkedevents/settings.py
--- a/linkedevents/settings.py
+++ b/linkedevents/settings.py
@@ -55,6 +55,7 @@
'helevents',
'munigeo',
+ 'leaflet',
)
MIDDLEWARE_CLASSES = (
| {"golden_diff": "diff --git a/events/admin.py b/events/admin.py\n--- a/events/admin.py\n+++ b/events/admin.py\n@@ -1,9 +1,9 @@\n from django.conf import settings\n from django.contrib import admin\n from django.contrib.admin.widgets import FilteredSelectMultiple\n-from django.contrib.gis import admin as geoadmin\n from django.contrib.gis.db import models\n from django.utils.translation import ugettext as _\n+from leaflet.admin import LeafletGeoAdmin\n from modeltranslation.admin import TranslationAdmin\n from reversion.admin import VersionAdmin\n from events.api import generate_id\n@@ -30,8 +30,16 @@\n pass\n \n \n-class PlaceAdmin(geoadmin.GeoModelAdmin, BaseAdmin, TranslationAdmin,\n- VersionAdmin):\n+class HelsinkiGeoAdmin(LeafletGeoAdmin):\n+ settings_overrides = {\n+ 'DEFAULT_CENTER': (60.171944, 24.941389),\n+ 'DEFAULT_ZOOM': 11,\n+ 'MIN_ZOOM': 3,\n+ 'MAX_ZOOM': 19,\n+ }\n+\n+\n+class PlaceAdmin(HelsinkiGeoAdmin, BaseAdmin, TranslationAdmin, VersionAdmin):\n fieldsets = (\n (None, {\n 'fields': ('publisher', 'name', 'description', 'info_url', 'position', 'parent')\ndiff --git a/linkedevents/settings.py b/linkedevents/settings.py\n--- a/linkedevents/settings.py\n+++ b/linkedevents/settings.py\n@@ -55,6 +55,7 @@\n \n 'helevents',\n 'munigeo',\n+ 'leaflet',\n )\n \n MIDDLEWARE_CLASSES = (\n", "issue": "Use Helsinki servicemap in adding Place in LE admin\n\n", "before_files": [{"content": "\"\"\"\nDjango base settings for linkedevents project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\nDEBUG = False\n\nTEMPLATE_DEBUG = False\n\nALLOWED_HOSTS = []\n\nSITE_ID = 1\n\n# Application definition\n\nINSTALLED_APPS = (\n 'helusers',\n 'django.contrib.sites',\n 'modeltranslation',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.gis',\n 'django.contrib.postgres',\n 'django_extensions',\n 'events',\n 'corsheaders',\n 'rest_framework',\n 'rest_framework_jwt',\n 'mptt',\n 'reversion',\n 'haystack',\n 'raven.contrib.django.raven_compat',\n 'django_cleanup',\n\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n 'helusers.providers.helsinki',\n\n 'helevents',\n 'munigeo',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'reversion.middleware.RevisionMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'linkedevents.urls'\n\nWSGI_APPLICATION = 'linkedevents.wsgi.application'\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.contrib.gis.db.backends.postgis',\n 'NAME': 'linkedevents',\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGES = (\n ('fi', 'Finnish'),\n ('sv', 'Swedish'),\n ('en', 'English'),\n)\n\nLANGUAGE_CODE = 'fi'\n\nTIME_ZONE = 'Europe/Helsinki'\n\nSYSTEM_DATA_SOURCE_ID = 'system'\n\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nLOCALE_PATHS = (\n os.path.join(BASE_DIR, 'locale'),\n)\n\nIMPORT_FILE_PATH = os.path.join(BASE_DIR, 'data')\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = '/media/'\n\n#\n# Authentication\n#\nAUTH_USER_MODEL = 'helevents.User'\nAUTHENTICATION_BACKENDS = (\n 'django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\nSOCIALACCOUNT_PROVIDERS = {\n 'helsinki': {\n 'VERIFIED_EMAIL': True\n }\n}\nLOGIN_REDIRECT_URL = '/'\nACCOUNT_LOGOUT_ON_GET = True\nSOCIALACCOUNT_ADAPTER = 'helusers.providers.helsinki.provider.SocialAccountAdapter'\n\n#\n# REST Framework\n#\nREST_FRAMEWORK = {\n 'PAGE_SIZE': 20,\n 'ORDERING_PARAM': 'sort',\n 'DEFAULT_RENDERER_CLASSES': (\n 'events.renderers.JSONRenderer',\n 'events.renderers.JSONLDRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n ),\n 'DEFAULT_PARSER_CLASSES': (\n 'events.parsers.CamelCaseJSONParser',\n 'events.parsers.JSONLDParser',\n 'rest_framework.parsers.FormParser',\n 'rest_framework.parsers.MultiPartParser',\n ),\n 'DEFAULT_FILTER_BACKENDS': (\n 'rest_framework.filters.DjangoFilterBackend',\n ),\n 'DEFAULT_PAGINATION_CLASS': 'events.api_pagination.CustomPagination',\n 'DEFAULT_PERMISSION_CLASSES': (\n 'rest_framework.permissions.IsAuthenticatedOrReadOnly',\n ),\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'helusers.jwt.JWTAuthentication',\n ),\n 'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.URLPathVersioning',\n 'VIEW_NAME_FUNCTION': 'events.api.get_view_name',\n}\nJWT_AUTH = {\n 'JWT_PAYLOAD_GET_USER_ID_HANDLER': 'helusers.jwt.get_user_id_from_payload_handler',\n # JWT_AUDIENCE and JWT_SECRET_KEY must be set in local_settings.py\n}\n\n\nCORS_ORIGIN_ALLOW_ALL = True\nCSRF_COOKIE_NAME = 'linkedevents-csrftoken'\nSESSION_COOKIE_NAME = 'linkedevents-sessionid'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(BASE_DIR, 'templates'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\n\nPOSTGIS_VERSION = (2, 1, 1)\n\n# Use ETRS-TM35FIN projection by default\nPROJECTION_SRID=3067\n# Bounding box of Finland and then some\n\nBOUNDING_BOX = [-548576, 6291456, 1548576, 8388608]\n\nCITYSDK_API_SETTINGS = {\n 'CITYSDK_URL': \"http://api.tourism.helsinki.citysdk.eu/CitySDK/\",\n 'USERNAME': 'admin',\n 'PASSWORD': 'defaultCitySDKPassword',\n 'SRS_URL': 'http://www.opengis.net/def/crs/EPSG/0/%d' % PROJECTION_SRID,\n 'DEFAULT_POI_CATEGORY': '53562f3238653c0a842a3bf7'\n}\n\nHAYSTACK_CONNECTIONS = {\n 'default': {\n 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine'\n },\n 'default-fi': {\n 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine'\n },\n 'default-en': {\n 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine'\n },\n 'default-sv': {\n 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine'\n }\n}\n\nimport bleach\nBLEACH_ALLOWED_TAGS = bleach.ALLOWED_TAGS + [\"p\", \"div\"]\n\nfrom easy_thumbnails.conf import Settings as thumbnail_settings\nTHUMBNAIL_PROCESSORS = (\n 'image_cropping.thumbnail_processors.crop_corners',\n) + thumbnail_settings.THUMBNAIL_PROCESSORS\n\n# local_settings.py can be used to override environment-specific settings\n# like database and email that differ between development and production.\nf = os.path.join(BASE_DIR, \"local_settings.py\")\nif os.path.exists(f):\n import sys\n import imp\n module_name = \"%s.local_settings\" % ROOT_URLCONF.split('.')[0]\n module = imp.new_module(module_name)\n module.__file__ = f\n sys.modules[module_name] = module\n exec(open(f, \"rb\").read())\n\nif 'SECRET_KEY' not in locals():\n secret_file = os.path.join(BASE_DIR, '.django_secret')\n try:\n SECRET_KEY = open(secret_file).read().strip()\n except IOError:\n import random\n system_random = random.SystemRandom()\n try:\n SECRET_KEY = ''.join([system_random.choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(64)])\n secret = open(secret_file, 'w')\n import os\n os.chmod(secret_file, 0o0600)\n secret.write(SECRET_KEY)\n secret.close()\n except IOError:\n Exception('Please create a %s file with random characters to generate your secret key!' % secret_file)\n", "path": "linkedevents/settings.py"}, {"content": "from django.conf import settings\nfrom django.contrib import admin\nfrom django.contrib.admin.widgets import FilteredSelectMultiple\nfrom django.contrib.gis import admin as geoadmin\nfrom django.contrib.gis.db import models\nfrom django.utils.translation import ugettext as _\nfrom modeltranslation.admin import TranslationAdmin\nfrom reversion.admin import VersionAdmin\nfrom events.api import generate_id\nfrom events.models import Event, Keyword, Place, Language, \\\n OpeningHoursSpecification, KeywordLabel, Organization, License\n\n\nclass BaseAdmin(admin.ModelAdmin):\n exclude = (\"created_by\", \"modified_by\",)\n\n def save_model(self, request, obj, form, change):\n if obj.pk is None:\n obj.created_by = request.user\n else:\n obj.modified_by = request.user\n obj.save()\n\n\nclass EventModelAdmin(BaseAdmin, TranslationAdmin, VersionAdmin):\n pass\n\n\nclass KeywordAdmin(BaseAdmin, TranslationAdmin, VersionAdmin):\n pass\n\n\nclass PlaceAdmin(geoadmin.GeoModelAdmin, BaseAdmin, TranslationAdmin,\n VersionAdmin):\n fieldsets = (\n (None, {\n 'fields': ('publisher', 'name', 'description', 'info_url', 'position', 'parent')\n\n }),\n (_('Contact info'), {\n 'fields': ('email', 'telephone', 'contact_type', 'street_address', 'address_locality', 'address_region',\n 'postal_code', 'post_office_box_num')\n }),\n )\n\n def __init__(self, model, admin_site):\n super().__init__(model, admin_site)\n # use https CDN instead\n self.openlayers_url = 'https://cdnjs.cloudflare.com/ajax/libs/openlayers/2.13.1/OpenLayers.js'\n\n def save_model(self, request, obj, form, change):\n system_id = settings.SYSTEM_DATA_SOURCE_ID\n obj.data_source_id = system_id\n if not obj.id:\n obj.id = generate_id(system_id)\n obj.origin_id = obj.id.split(':')[1]\n\n super().save_model(request, obj, form, change)\n\nadmin.site.register(Place, PlaceAdmin)\n\n\nclass OrganizationAdmin(BaseAdmin):\n list_display = ('name', 'nr_org_admins')\n\n formfield_overrides = {\n models.ManyToManyField: {'widget': FilteredSelectMultiple(\"yll\u00e4pit\u00e4j\u00e4t\", is_stacked=False)},\n }\n fields = ('admin_users',)\n\n def nr_org_admins(self, obj):\n return obj.admin_users.count()\n nr_org_admins.short_description = _('Admins')\n\nadmin.site.register(Organization, OrganizationAdmin)\n\n\nclass LanguageAdmin(BaseAdmin, VersionAdmin):\n pass\n\n\nclass PersonAdmin(BaseAdmin, VersionAdmin):\n pass\n\n\nclass LicenseAdmin(BaseAdmin, TranslationAdmin, VersionAdmin):\n def get_readonly_fields(self, request, obj=None):\n if obj:\n return ['id']\n else:\n return []\n\nadmin.site.register(License, LicenseAdmin)\n", "path": "events/admin.py"}]} | 3,873 | 365 |
gh_patches_debug_29594 | rasdani/github-patches | git_diff | fossasia__open-event-server-6739 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove version model
**Describe the bug**
The version model is not used currently and should be removed
https://github.com/fossasia/open-event-server/blob/development/app/models/version.py
**Additional context**
@iamareebjamal Taking this
</issue>
<code>
[start of app/models/version.py]
1 from sqlalchemy.orm import backref
2
3 from app.models import db
4
5
6 class Version(db.Model):
7 """Version model class"""
8 __tablename__ = 'versions'
9 id = db.Column(db.Integer, primary_key=True)
10 event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='CASCADE'))
11 events = db.relationship("Event", backref=backref('version', uselist=False))
12
13 event_ver = db.Column(db.Integer, nullable=False, default=0)
14 sessions_ver = db.Column(db.Integer, nullable=False, default=0)
15 speakers_ver = db.Column(db.Integer, nullable=False, default=0)
16 tracks_ver = db.Column(db.Integer, nullable=False, default=0)
17 sponsors_ver = db.Column(db.Integer, nullable=False, default=0)
18 microlocations_ver = db.Column(db.Integer, nullable=False, default=0)
19
20 def __init__(self,
21 event_id=None,
22 event_ver=None,
23 sessions_ver=None,
24 speakers_ver=None,
25 tracks_ver=None,
26 sponsors_ver=None,
27 microlocations_ver=None):
28 self.event_id = event_id
29 self.event_ver = event_ver
30 self.sessions_ver = sessions_ver
31 self.speakers_ver = speakers_ver
32 self.tracks_ver = tracks_ver
33 self.sponsors_ver = sponsors_ver
34 self.microlocations_ver = microlocations_ver
35
36 def __repr__(self):
37 return '<Version %r>' % self.id
38
39 def __str__(self):
40 return self.__repr__()
41
42 @property
43 def serialize(self):
44 """Return object data in easily serializable format"""
45 return {
46 'version': [
47 {'id': self.id,
48 'event_id': self.event_id,
49 'event_ver': self.event_ver,
50 'sessions_ver': self.sessions_ver,
51 'speakers_ver': self.speakers_ver,
52 'tracks_ver': self.tracks_ver,
53 'sponsors_ver': self.sponsors_ver,
54 'microlocations_ver': self.microlocations_ver}
55 ]
56 }
57
[end of app/models/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/models/version.py b/app/models/version.py
deleted file mode 100644
--- a/app/models/version.py
+++ /dev/null
@@ -1,56 +0,0 @@
-from sqlalchemy.orm import backref
-
-from app.models import db
-
-
-class Version(db.Model):
- """Version model class"""
- __tablename__ = 'versions'
- id = db.Column(db.Integer, primary_key=True)
- event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='CASCADE'))
- events = db.relationship("Event", backref=backref('version', uselist=False))
-
- event_ver = db.Column(db.Integer, nullable=False, default=0)
- sessions_ver = db.Column(db.Integer, nullable=False, default=0)
- speakers_ver = db.Column(db.Integer, nullable=False, default=0)
- tracks_ver = db.Column(db.Integer, nullable=False, default=0)
- sponsors_ver = db.Column(db.Integer, nullable=False, default=0)
- microlocations_ver = db.Column(db.Integer, nullable=False, default=0)
-
- def __init__(self,
- event_id=None,
- event_ver=None,
- sessions_ver=None,
- speakers_ver=None,
- tracks_ver=None,
- sponsors_ver=None,
- microlocations_ver=None):
- self.event_id = event_id
- self.event_ver = event_ver
- self.sessions_ver = sessions_ver
- self.speakers_ver = speakers_ver
- self.tracks_ver = tracks_ver
- self.sponsors_ver = sponsors_ver
- self.microlocations_ver = microlocations_ver
-
- def __repr__(self):
- return '<Version %r>' % self.id
-
- def __str__(self):
- return self.__repr__()
-
- @property
- def serialize(self):
- """Return object data in easily serializable format"""
- return {
- 'version': [
- {'id': self.id,
- 'event_id': self.event_id,
- 'event_ver': self.event_ver,
- 'sessions_ver': self.sessions_ver,
- 'speakers_ver': self.speakers_ver,
- 'tracks_ver': self.tracks_ver,
- 'sponsors_ver': self.sponsors_ver,
- 'microlocations_ver': self.microlocations_ver}
- ]
- }
| {"golden_diff": "diff --git a/app/models/version.py b/app/models/version.py\ndeleted file mode 100644\n--- a/app/models/version.py\n+++ /dev/null\n@@ -1,56 +0,0 @@\n-from sqlalchemy.orm import backref\n-\n-from app.models import db\n-\n-\n-class Version(db.Model):\n- \"\"\"Version model class\"\"\"\n- __tablename__ = 'versions'\n- id = db.Column(db.Integer, primary_key=True)\n- event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='CASCADE'))\n- events = db.relationship(\"Event\", backref=backref('version', uselist=False))\n-\n- event_ver = db.Column(db.Integer, nullable=False, default=0)\n- sessions_ver = db.Column(db.Integer, nullable=False, default=0)\n- speakers_ver = db.Column(db.Integer, nullable=False, default=0)\n- tracks_ver = db.Column(db.Integer, nullable=False, default=0)\n- sponsors_ver = db.Column(db.Integer, nullable=False, default=0)\n- microlocations_ver = db.Column(db.Integer, nullable=False, default=0)\n-\n- def __init__(self,\n- event_id=None,\n- event_ver=None,\n- sessions_ver=None,\n- speakers_ver=None,\n- tracks_ver=None,\n- sponsors_ver=None,\n- microlocations_ver=None):\n- self.event_id = event_id\n- self.event_ver = event_ver\n- self.sessions_ver = sessions_ver\n- self.speakers_ver = speakers_ver\n- self.tracks_ver = tracks_ver\n- self.sponsors_ver = sponsors_ver\n- self.microlocations_ver = microlocations_ver\n-\n- def __repr__(self):\n- return '<Version %r>' % self.id\n-\n- def __str__(self):\n- return self.__repr__()\n-\n- @property\n- def serialize(self):\n- \"\"\"Return object data in easily serializable format\"\"\"\n- return {\n- 'version': [\n- {'id': self.id,\n- 'event_id': self.event_id,\n- 'event_ver': self.event_ver,\n- 'sessions_ver': self.sessions_ver,\n- 'speakers_ver': self.speakers_ver,\n- 'tracks_ver': self.tracks_ver,\n- 'sponsors_ver': self.sponsors_ver,\n- 'microlocations_ver': self.microlocations_ver}\n- ]\n- }\n", "issue": "Remove version model\n**Describe the bug**\r\nThe version model is not used currently and should be removed\r\n\r\nhttps://github.com/fossasia/open-event-server/blob/development/app/models/version.py\r\n\r\n\r\n**Additional context**\r\n@iamareebjamal Taking this\r\n\n", "before_files": [{"content": "from sqlalchemy.orm import backref\n\nfrom app.models import db\n\n\nclass Version(db.Model):\n \"\"\"Version model class\"\"\"\n __tablename__ = 'versions'\n id = db.Column(db.Integer, primary_key=True)\n event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='CASCADE'))\n events = db.relationship(\"Event\", backref=backref('version', uselist=False))\n\n event_ver = db.Column(db.Integer, nullable=False, default=0)\n sessions_ver = db.Column(db.Integer, nullable=False, default=0)\n speakers_ver = db.Column(db.Integer, nullable=False, default=0)\n tracks_ver = db.Column(db.Integer, nullable=False, default=0)\n sponsors_ver = db.Column(db.Integer, nullable=False, default=0)\n microlocations_ver = db.Column(db.Integer, nullable=False, default=0)\n\n def __init__(self,\n event_id=None,\n event_ver=None,\n sessions_ver=None,\n speakers_ver=None,\n tracks_ver=None,\n sponsors_ver=None,\n microlocations_ver=None):\n self.event_id = event_id\n self.event_ver = event_ver\n self.sessions_ver = sessions_ver\n self.speakers_ver = speakers_ver\n self.tracks_ver = tracks_ver\n self.sponsors_ver = sponsors_ver\n self.microlocations_ver = microlocations_ver\n\n def __repr__(self):\n return '<Version %r>' % self.id\n\n def __str__(self):\n return self.__repr__()\n\n @property\n def serialize(self):\n \"\"\"Return object data in easily serializable format\"\"\"\n return {\n 'version': [\n {'id': self.id,\n 'event_id': self.event_id,\n 'event_ver': self.event_ver,\n 'sessions_ver': self.sessions_ver,\n 'speakers_ver': self.speakers_ver,\n 'tracks_ver': self.tracks_ver,\n 'sponsors_ver': self.sponsors_ver,\n 'microlocations_ver': self.microlocations_ver}\n ]\n }\n", "path": "app/models/version.py"}]} | 1,123 | 525 |
gh_patches_debug_64623 | rasdani/github-patches | git_diff | quantumlib__Cirq-423 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MergeInteractions optimization is creating bad circuits
Not sure how this made it past the tests yet.
```python
def main():
circuit = make_inefficient_circuit()
print("BEFORE:")
print(circuit.to_unitary_matrix().round(3))
print()
cirq.google.MergeInteractions().optimize_circuit(circuit)
print("AFTER:")
print(circuit.to_unitary_matrix().round(3))
def make_inefficient_circuit(t=0.1, v=0.11):
from cirq import H, CNOT, Z
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
return cirq.Circuit.from_ops(
H(b),
CNOT(a, b),
H(b),
CNOT(a, b),
CNOT(b, a),
H(a),
CNOT(a, b),
Z(a)**t, Z(b)**-t,
CNOT(a, b),
H(a), Z(b)**v,
CNOT(a, b),
Z(a)**-v, Z(b)**-v)
```
```
BEFORE:
[[ 1. -0.j 0. -0.j 0. +0.j 0. -0.j ]
[-0. -0.j -0. -0.309j 0.951-0.j -0. +0.j ]
[ 0. -0.j 0.951-0.j -0. -0.309j 0. +0.j ]
[-0. +0.j 0. +0.j 0. -0.j -0.771+0.637j]]
AFTER:
[[-0.354-0.354j -0.354-0.354j -0.227-0.446j 0.227+0.446j]
[-0.452-0.213j -0.452-0.213j 0.364+0.342j -0.364-0.342j]
[-0.354-0.354j 0.354+0.354j -0.446-0.227j -0.446-0.227j]
[-0.452-0.213j 0.452+0.213j 0.496+0.063j 0.496+0.063j]]
```
</issue>
<code>
[start of cirq/google/merge_interactions.py]
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """An optimization pass that combines adjacent single-qubit rotations."""
16
17 from typing import List, Tuple, Optional, cast
18
19 import numpy as np
20
21 from cirq import ops
22 from cirq.circuits import (
23 Circuit,
24 PointOptimizer,
25 PointOptimizationSummary,
26 )
27 from cirq.extension import Extensions
28 from cirq.google.decompositions import two_qubit_matrix_to_native_gates
29
30
31 class MergeInteractions(PointOptimizer):
32 """Combines adjacent constant single-qubit rotations."""
33
34 def __init__(self,
35 tolerance: float = 1e-8,
36 allow_partial_czs: bool = True,
37 extensions: Extensions = None) -> None:
38 self.tolerance = tolerance
39 self.allow_partial_czs = allow_partial_czs
40 self.extensions = extensions or Extensions()
41
42 def optimization_at(self, circuit, index, op):
43 if len(op.qubits) != 2:
44 return None
45
46 interaction_count, indices, matrix = (
47 self._scan_two_qubit_ops_into_matrix(circuit, index, op.qubits))
48 if interaction_count <= 1:
49 return None
50
51 # Find a max-3-cz construction.
52 operations = two_qubit_matrix_to_native_gates(
53 op.qubits[0],
54 op.qubits[1],
55 matrix,
56 self.allow_partial_czs,
57 self.tolerance)
58
59 # TODO: don't replace if there's no benefit in CZ depth.
60
61 return PointOptimizationSummary(
62 clear_span=max(indices) + 1 - index,
63 clear_qubits=op.qubits,
64 new_operations=operations)
65
66 def _op_to_matrix(self,
67 op: ops.Operation,
68 qubits: Tuple[ops.QubitId, ...]
69 ) -> Optional[Tuple[np.ndarray, bool]]:
70 """Determines the effect of an operation on the given qubits.
71
72 The operation must be a 1-qubit operation on one of the given qubits,
73 or a 2-qubit operation on both of the given qubits. Also, the operation
74 must have a known matrix. Otherwise None is returned.
75
76 Args:
77 op: The operation to understand.
78 qubits: The qubits we care about. Order determines matrix tensor
79 order.
80
81 Returns:
82 None, or else a tuple containing a matrix equivalent to the effect
83 of the operation and a boolean indicating if the operation is a
84 2-qubit interaction.
85 """
86 q1, q2 = qubits
87
88 known = self.extensions.try_cast(op.gate, ops.KnownMatrixGate)
89 if known is None:
90 return None
91 m = known.matrix()
92
93 if op.qubits == qubits:
94 return m, True
95 if op.qubits == (q2, q1):
96 return MergeInteractions._flip_kron_order(m), True
97 if op.qubits == (q1,):
98 return np.kron(np.eye(2), m), False
99 if op.qubits == (q2,):
100 return np.kron(m, np.eye(2)), False
101
102 return None
103
104 def _scan_two_qubit_ops_into_matrix(
105 self,
106 circuit: Circuit,
107 index: Optional[int],
108 qubits: Tuple[ops.QubitId, ...]
109 ) -> Tuple[int, List[int], np.ndarray]:
110 """Accumulates operations affecting the given pair of qubits.
111
112 The scan terminates when it hits the end of the circuit, finds an
113 operation without a known matrix, or finds an operation that interacts
114 the given qubits with other qubits.
115
116 Args:
117 circuit: The circuit to scan for operations.
118 index: The index to start scanning forward from.
119 qubits: The pair of qubits we care about.
120
121 Returns:
122 A tuple containing:
123 0. The number of 2-qubit operations that were scanned.
124 1. The moment indices those operations were on.
125 2. A matrix equivalent to the effect of the scanned operations.
126 """
127
128 product = np.eye(4, dtype=np.complex128)
129 interaction_count = 0
130 touched_indices = []
131
132 while index is not None:
133 operations = {circuit.operation_at(q, index) for q in qubits}
134 op_data = [
135 self._op_to_matrix(op, qubits)
136 for op in operations
137 if op
138 ]
139
140 # Stop at any non-constant or non-local interaction.
141 if any(e is None for e in op_data):
142 break
143 present_op_data = cast(List[Tuple[np.ndarray, bool]], op_data)
144
145 for op_mat, interacts in present_op_data:
146 product = np.dot(op_mat, product)
147 if interacts:
148 interaction_count += 1
149
150 touched_indices.append(index)
151 index = circuit.next_moment_operating_on(qubits, index + 1)
152
153 return interaction_count, touched_indices, product
154
155 @staticmethod
156 def _flip_kron_order(mat4x4: np.ndarray) -> np.ndarray:
157 """Given M = sum(kron(a_i, b_i)), returns M' = sum(kron(b_i, a_i))."""
158 result = np.array([[0] * 4] * 4, dtype=np.complex128)
159 order = [0, 2, 1, 3]
160 for i in range(4):
161 for j in range(4):
162 result[order[i], order[j]] = mat4x4[i, j]
163 return result
164
[end of cirq/google/merge_interactions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cirq/google/merge_interactions.py b/cirq/google/merge_interactions.py
--- a/cirq/google/merge_interactions.py
+++ b/cirq/google/merge_interactions.py
@@ -95,9 +95,9 @@
if op.qubits == (q2, q1):
return MergeInteractions._flip_kron_order(m), True
if op.qubits == (q1,):
- return np.kron(np.eye(2), m), False
- if op.qubits == (q2,):
return np.kron(m, np.eye(2)), False
+ if op.qubits == (q2,):
+ return np.kron(np.eye(2), m), False
return None
| {"golden_diff": "diff --git a/cirq/google/merge_interactions.py b/cirq/google/merge_interactions.py\n--- a/cirq/google/merge_interactions.py\n+++ b/cirq/google/merge_interactions.py\n@@ -95,9 +95,9 @@\n if op.qubits == (q2, q1):\n return MergeInteractions._flip_kron_order(m), True\n if op.qubits == (q1,):\n- return np.kron(np.eye(2), m), False\n- if op.qubits == (q2,):\n return np.kron(m, np.eye(2)), False\n+ if op.qubits == (q2,):\n+ return np.kron(np.eye(2), m), False\n \n return None\n", "issue": "MergeInteractions optimization is creating bad circuits\nNot sure how this made it past the tests yet.\r\n\r\n```python\r\ndef main():\r\n circuit = make_inefficient_circuit()\r\n print(\"BEFORE:\")\r\n print(circuit.to_unitary_matrix().round(3))\r\n print()\r\n cirq.google.MergeInteractions().optimize_circuit(circuit)\r\n print(\"AFTER:\")\r\n print(circuit.to_unitary_matrix().round(3))\r\n\r\ndef make_inefficient_circuit(t=0.1, v=0.11):\r\n from cirq import H, CNOT, Z\r\n a = cirq.NamedQubit('a')\r\n b = cirq.NamedQubit('b')\r\n\r\n return cirq.Circuit.from_ops(\r\n H(b),\r\n CNOT(a, b),\r\n H(b),\r\n CNOT(a, b),\r\n CNOT(b, a),\r\n H(a),\r\n CNOT(a, b),\r\n Z(a)**t, Z(b)**-t,\r\n CNOT(a, b),\r\n H(a), Z(b)**v,\r\n CNOT(a, b),\r\n Z(a)**-v, Z(b)**-v)\r\n```\r\n\r\n```\r\nBEFORE:\r\n[[ 1. -0.j 0. -0.j 0. +0.j 0. -0.j ]\r\n [-0. -0.j -0. -0.309j 0.951-0.j -0. +0.j ]\r\n [ 0. -0.j 0.951-0.j -0. -0.309j 0. +0.j ]\r\n [-0. +0.j 0. +0.j 0. -0.j -0.771+0.637j]]\r\n\r\nAFTER:\r\n[[-0.354-0.354j -0.354-0.354j -0.227-0.446j 0.227+0.446j]\r\n [-0.452-0.213j -0.452-0.213j 0.364+0.342j -0.364-0.342j]\r\n [-0.354-0.354j 0.354+0.354j -0.446-0.227j -0.446-0.227j]\r\n [-0.452-0.213j 0.452+0.213j 0.496+0.063j 0.496+0.063j]]\r\n```\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"An optimization pass that combines adjacent single-qubit rotations.\"\"\"\n\nfrom typing import List, Tuple, Optional, cast\n\nimport numpy as np\n\nfrom cirq import ops\nfrom cirq.circuits import (\n Circuit,\n PointOptimizer,\n PointOptimizationSummary,\n)\nfrom cirq.extension import Extensions\nfrom cirq.google.decompositions import two_qubit_matrix_to_native_gates\n\n\nclass MergeInteractions(PointOptimizer):\n \"\"\"Combines adjacent constant single-qubit rotations.\"\"\"\n\n def __init__(self,\n tolerance: float = 1e-8,\n allow_partial_czs: bool = True,\n extensions: Extensions = None) -> None:\n self.tolerance = tolerance\n self.allow_partial_czs = allow_partial_czs\n self.extensions = extensions or Extensions()\n\n def optimization_at(self, circuit, index, op):\n if len(op.qubits) != 2:\n return None\n\n interaction_count, indices, matrix = (\n self._scan_two_qubit_ops_into_matrix(circuit, index, op.qubits))\n if interaction_count <= 1:\n return None\n\n # Find a max-3-cz construction.\n operations = two_qubit_matrix_to_native_gates(\n op.qubits[0],\n op.qubits[1],\n matrix,\n self.allow_partial_czs,\n self.tolerance)\n\n # TODO: don't replace if there's no benefit in CZ depth.\n\n return PointOptimizationSummary(\n clear_span=max(indices) + 1 - index,\n clear_qubits=op.qubits,\n new_operations=operations)\n\n def _op_to_matrix(self,\n op: ops.Operation,\n qubits: Tuple[ops.QubitId, ...]\n ) -> Optional[Tuple[np.ndarray, bool]]:\n \"\"\"Determines the effect of an operation on the given qubits.\n\n The operation must be a 1-qubit operation on one of the given qubits,\n or a 2-qubit operation on both of the given qubits. Also, the operation\n must have a known matrix. Otherwise None is returned.\n\n Args:\n op: The operation to understand.\n qubits: The qubits we care about. Order determines matrix tensor\n order.\n\n Returns:\n None, or else a tuple containing a matrix equivalent to the effect\n of the operation and a boolean indicating if the operation is a\n 2-qubit interaction.\n \"\"\"\n q1, q2 = qubits\n\n known = self.extensions.try_cast(op.gate, ops.KnownMatrixGate)\n if known is None:\n return None\n m = known.matrix()\n\n if op.qubits == qubits:\n return m, True\n if op.qubits == (q2, q1):\n return MergeInteractions._flip_kron_order(m), True\n if op.qubits == (q1,):\n return np.kron(np.eye(2), m), False\n if op.qubits == (q2,):\n return np.kron(m, np.eye(2)), False\n\n return None\n\n def _scan_two_qubit_ops_into_matrix(\n self,\n circuit: Circuit,\n index: Optional[int],\n qubits: Tuple[ops.QubitId, ...]\n ) -> Tuple[int, List[int], np.ndarray]:\n \"\"\"Accumulates operations affecting the given pair of qubits.\n\n The scan terminates when it hits the end of the circuit, finds an\n operation without a known matrix, or finds an operation that interacts\n the given qubits with other qubits.\n\n Args:\n circuit: The circuit to scan for operations.\n index: The index to start scanning forward from.\n qubits: The pair of qubits we care about.\n\n Returns:\n A tuple containing:\n 0. The number of 2-qubit operations that were scanned.\n 1. The moment indices those operations were on.\n 2. A matrix equivalent to the effect of the scanned operations.\n \"\"\"\n\n product = np.eye(4, dtype=np.complex128)\n interaction_count = 0\n touched_indices = []\n\n while index is not None:\n operations = {circuit.operation_at(q, index) for q in qubits}\n op_data = [\n self._op_to_matrix(op, qubits)\n for op in operations\n if op\n ]\n\n # Stop at any non-constant or non-local interaction.\n if any(e is None for e in op_data):\n break\n present_op_data = cast(List[Tuple[np.ndarray, bool]], op_data)\n\n for op_mat, interacts in present_op_data:\n product = np.dot(op_mat, product)\n if interacts:\n interaction_count += 1\n\n touched_indices.append(index)\n index = circuit.next_moment_operating_on(qubits, index + 1)\n\n return interaction_count, touched_indices, product\n\n @staticmethod\n def _flip_kron_order(mat4x4: np.ndarray) -> np.ndarray:\n \"\"\"Given M = sum(kron(a_i, b_i)), returns M' = sum(kron(b_i, a_i)).\"\"\"\n result = np.array([[0] * 4] * 4, dtype=np.complex128)\n order = [0, 2, 1, 3]\n for i in range(4):\n for j in range(4):\n result[order[i], order[j]] = mat4x4[i, j]\n return result\n", "path": "cirq/google/merge_interactions.py"}]} | 2,877 | 166 |
gh_patches_debug_7629 | rasdani/github-patches | git_diff | pre-commit__pre-commit-1443 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
hook using node doesn't work on FreeBSD
We have a hook using doctoc, the node package. I cannot install the hook, this is the output:
```
desktop1% pre-commit install --install-hooks
pre-commit installed at .git/hooks/pre-commit
[INFO] Initializing environment for [email protected]:jockej/pre-commit-hooks.git.
[INFO] Initializing environment for https://github.com/thlorenz/doctoc.
[INFO] Initializing environment for https://gitlab.com/pycqa/flake8.
[INFO] Initializing environment for https://github.com/jorisroovers/gitlint.
[INFO] Installing environment for https://github.com/thlorenz/doctoc.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
An unexpected error has occurred: CalledProcessError: command: ('/home/joakim/.cache/pre-commit/repoizbdwm4g/node_env-system/bin/node', '/usr/local/bin/npm', 'install', '-g', '.')
return code: 254
expected return code: 0
stdout: (none)
stderr:
npm ERR! code ENOENT
npm ERR! syscall chmod
npm ERR! path /home/joakim/.cache/pre-commit/repoizbdwm4g/node_env-system/lib/node_modules/doctoc/doctoc.js
npm ERR! errno -2
npm ERR! enoent ENOENT: no such file or directory, chmod '/home/joakim/.cache/pre-commit/repoizbdwm4g/node_env-system/lib/node_modules/doctoc/doctoc.js'
npm ERR! enoent This is related to npm not being able to find a file.
npm ERR! enoent
npm ERR! A complete log of this run can be found in:
npm ERR! /home/joakim/.npm/_logs/2020-05-07T10_30_16_218Z-debug.log
Check the log at /home/joakim/.cache/pre-commit/pre-commit.log
```
These hooks all install and run perfectly on GNU/Linux.
I believe it happens because pre-commit tries to use the --prebuilt flag to nodeenv, however this doesn't work for FreeBSD since there are no prebuilt packages (the URL constructed gives a 404). I have node installed so it should be possible to use --system, but I haven't been able to make it work myself.
Nodeenv itself seems to work fine with --system.
Please let me know if I can help with trying things on FreeBSD.
</issue>
<code>
[start of pre_commit/store.py]
1 import contextlib
2 import logging
3 import os.path
4 import sqlite3
5 import tempfile
6 from typing import Callable
7 from typing import Generator
8 from typing import List
9 from typing import Optional
10 from typing import Sequence
11 from typing import Tuple
12
13 import pre_commit.constants as C
14 from pre_commit import file_lock
15 from pre_commit import git
16 from pre_commit.util import CalledProcessError
17 from pre_commit.util import clean_path_on_failure
18 from pre_commit.util import cmd_output_b
19 from pre_commit.util import resource_text
20 from pre_commit.util import rmtree
21
22
23 logger = logging.getLogger('pre_commit')
24
25
26 def _get_default_directory() -> str:
27 """Returns the default directory for the Store. This is intentionally
28 underscored to indicate that `Store.get_default_directory` is the intended
29 way to get this information. This is also done so
30 `Store.get_default_directory` can be mocked in tests and
31 `_get_default_directory` can be tested.
32 """
33 return os.environ.get('PRE_COMMIT_HOME') or os.path.join(
34 os.environ.get('XDG_CACHE_HOME') or os.path.expanduser('~/.cache'),
35 'pre-commit',
36 )
37
38
39 class Store:
40 get_default_directory = staticmethod(_get_default_directory)
41
42 def __init__(self, directory: Optional[str] = None) -> None:
43 self.directory = directory or Store.get_default_directory()
44 self.db_path = os.path.join(self.directory, 'db.db')
45
46 if not os.path.exists(self.directory):
47 os.makedirs(self.directory, exist_ok=True)
48 with open(os.path.join(self.directory, 'README'), 'w') as f:
49 f.write(
50 'This directory is maintained by the pre-commit project.\n'
51 'Learn more: https://github.com/pre-commit/pre-commit\n',
52 )
53
54 if os.path.exists(self.db_path):
55 return
56 with self.exclusive_lock():
57 # Another process may have already completed this work
58 if os.path.exists(self.db_path): # pragma: no cover (race)
59 return
60 # To avoid a race where someone ^Cs between db creation and
61 # execution of the CREATE TABLE statement
62 fd, tmpfile = tempfile.mkstemp(dir=self.directory)
63 # We'll be managing this file ourselves
64 os.close(fd)
65 with self.connect(db_path=tmpfile) as db:
66 db.executescript(
67 'CREATE TABLE repos ('
68 ' repo TEXT NOT NULL,'
69 ' ref TEXT NOT NULL,'
70 ' path TEXT NOT NULL,'
71 ' PRIMARY KEY (repo, ref)'
72 ');',
73 )
74 self._create_config_table(db)
75
76 # Atomic file move
77 os.rename(tmpfile, self.db_path)
78
79 @contextlib.contextmanager
80 def exclusive_lock(self) -> Generator[None, None, None]:
81 def blocked_cb() -> None: # pragma: no cover (tests are in-process)
82 logger.info('Locking pre-commit directory')
83
84 with file_lock.lock(os.path.join(self.directory, '.lock'), blocked_cb):
85 yield
86
87 @contextlib.contextmanager
88 def connect(
89 self,
90 db_path: Optional[str] = None,
91 ) -> Generator[sqlite3.Connection, None, None]:
92 db_path = db_path or self.db_path
93 # sqlite doesn't close its fd with its contextmanager >.<
94 # contextlib.closing fixes this.
95 # See: https://stackoverflow.com/a/28032829/812183
96 with contextlib.closing(sqlite3.connect(db_path)) as db:
97 # this creates a transaction
98 with db:
99 yield db
100
101 @classmethod
102 def db_repo_name(cls, repo: str, deps: Sequence[str]) -> str:
103 if deps:
104 return f'{repo}:{",".join(sorted(deps))}'
105 else:
106 return repo
107
108 def _new_repo(
109 self,
110 repo: str,
111 ref: str,
112 deps: Sequence[str],
113 make_strategy: Callable[[str], None],
114 ) -> str:
115 repo = self.db_repo_name(repo, deps)
116
117 def _get_result() -> Optional[str]:
118 # Check if we already exist
119 with self.connect() as db:
120 result = db.execute(
121 'SELECT path FROM repos WHERE repo = ? AND ref = ?',
122 (repo, ref),
123 ).fetchone()
124 return result[0] if result else None
125
126 result = _get_result()
127 if result:
128 return result
129 with self.exclusive_lock():
130 # Another process may have already completed this work
131 result = _get_result()
132 if result: # pragma: no cover (race)
133 return result
134
135 logger.info(f'Initializing environment for {repo}.')
136
137 directory = tempfile.mkdtemp(prefix='repo', dir=self.directory)
138 with clean_path_on_failure(directory):
139 make_strategy(directory)
140
141 # Update our db with the created repo
142 with self.connect() as db:
143 db.execute(
144 'INSERT INTO repos (repo, ref, path) VALUES (?, ?, ?)',
145 [repo, ref, directory],
146 )
147 return directory
148
149 def _complete_clone(self, ref: str, git_cmd: Callable[..., None]) -> None:
150 """Perform a complete clone of a repository and its submodules """
151
152 git_cmd('fetch', 'origin', '--tags')
153 git_cmd('checkout', ref)
154 git_cmd('submodule', 'update', '--init', '--recursive')
155
156 def _shallow_clone(self, ref: str, git_cmd: Callable[..., None]) -> None:
157 """Perform a shallow clone of a repository and its submodules """
158
159 git_config = 'protocol.version=2'
160 git_cmd('-c', git_config, 'fetch', 'origin', ref, '--depth=1')
161 git_cmd('checkout', 'FETCH_HEAD')
162 git_cmd(
163 '-c', git_config, 'submodule', 'update', '--init', '--recursive',
164 '--depth=1',
165 )
166
167 def clone(self, repo: str, ref: str, deps: Sequence[str] = ()) -> str:
168 """Clone the given url and checkout the specific ref."""
169
170 def clone_strategy(directory: str) -> None:
171 git.init_repo(directory, repo)
172 env = git.no_git_env()
173
174 def _git_cmd(*args: str) -> None:
175 cmd_output_b('git', *args, cwd=directory, env=env)
176
177 try:
178 self._shallow_clone(ref, _git_cmd)
179 except CalledProcessError:
180 self._complete_clone(ref, _git_cmd)
181
182 return self._new_repo(repo, ref, deps, clone_strategy)
183
184 LOCAL_RESOURCES = (
185 'Cargo.toml', 'main.go', 'go.mod', 'main.rs', '.npmignore',
186 'package.json', 'pre_commit_dummy_package.gemspec', 'setup.py',
187 'environment.yml', 'Makefile.PL',
188 )
189
190 def make_local(self, deps: Sequence[str]) -> str:
191 def make_local_strategy(directory: str) -> None:
192 for resource in self.LOCAL_RESOURCES:
193 contents = resource_text(f'empty_template_{resource}')
194 with open(os.path.join(directory, resource), 'w') as f:
195 f.write(contents)
196
197 env = git.no_git_env()
198
199 # initialize the git repository so it looks more like cloned repos
200 def _git_cmd(*args: str) -> None:
201 cmd_output_b('git', *args, cwd=directory, env=env)
202
203 git.init_repo(directory, '<<unknown>>')
204 _git_cmd('add', '.')
205 git.commit(repo=directory)
206
207 return self._new_repo(
208 'local', C.LOCAL_REPO_VERSION, deps, make_local_strategy,
209 )
210
211 def _create_config_table(self, db: sqlite3.Connection) -> None:
212 db.executescript(
213 'CREATE TABLE IF NOT EXISTS configs ('
214 ' path TEXT NOT NULL,'
215 ' PRIMARY KEY (path)'
216 ');',
217 )
218
219 def mark_config_used(self, path: str) -> None:
220 path = os.path.realpath(path)
221 # don't insert config files that do not exist
222 if not os.path.exists(path):
223 return
224 with self.connect() as db:
225 # TODO: eventually remove this and only create in _create
226 self._create_config_table(db)
227 db.execute('INSERT OR IGNORE INTO configs VALUES (?)', (path,))
228
229 def select_all_configs(self) -> List[str]:
230 with self.connect() as db:
231 self._create_config_table(db)
232 rows = db.execute('SELECT path FROM configs').fetchall()
233 return [path for path, in rows]
234
235 def delete_configs(self, configs: List[str]) -> None:
236 with self.connect() as db:
237 rows = [(path,) for path in configs]
238 db.executemany('DELETE FROM configs WHERE path = ?', rows)
239
240 def select_all_repos(self) -> List[Tuple[str, str, str]]:
241 with self.connect() as db:
242 return db.execute('SELECT repo, ref, path from repos').fetchall()
243
244 def delete_repo(self, db_repo_name: str, ref: str, path: str) -> None:
245 with self.connect() as db:
246 db.execute(
247 'DELETE FROM repos WHERE repo = ? and ref = ?',
248 (db_repo_name, ref),
249 )
250 rmtree(path)
251
[end of pre_commit/store.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/store.py b/pre_commit/store.py
--- a/pre_commit/store.py
+++ b/pre_commit/store.py
@@ -30,10 +30,11 @@
`Store.get_default_directory` can be mocked in tests and
`_get_default_directory` can be tested.
"""
- return os.environ.get('PRE_COMMIT_HOME') or os.path.join(
+ ret = os.environ.get('PRE_COMMIT_HOME') or os.path.join(
os.environ.get('XDG_CACHE_HOME') or os.path.expanduser('~/.cache'),
'pre-commit',
)
+ return os.path.realpath(ret)
class Store:
| {"golden_diff": "diff --git a/pre_commit/store.py b/pre_commit/store.py\n--- a/pre_commit/store.py\n+++ b/pre_commit/store.py\n@@ -30,10 +30,11 @@\n `Store.get_default_directory` can be mocked in tests and\n `_get_default_directory` can be tested.\n \"\"\"\n- return os.environ.get('PRE_COMMIT_HOME') or os.path.join(\n+ ret = os.environ.get('PRE_COMMIT_HOME') or os.path.join(\n os.environ.get('XDG_CACHE_HOME') or os.path.expanduser('~/.cache'),\n 'pre-commit',\n )\n+ return os.path.realpath(ret)\n \n \n class Store:\n", "issue": "hook using node doesn't work on FreeBSD\nWe have a hook using doctoc, the node package. I cannot install the hook, this is the output:\r\n\r\n```\r\ndesktop1% pre-commit install --install-hooks \r\npre-commit installed at .git/hooks/pre-commit\r\n[INFO] Initializing environment for [email protected]:jockej/pre-commit-hooks.git.\r\n[INFO] Initializing environment for https://github.com/thlorenz/doctoc.\r\n[INFO] Initializing environment for https://gitlab.com/pycqa/flake8.\r\n[INFO] Initializing environment for https://github.com/jorisroovers/gitlint.\r\n[INFO] Installing environment for https://github.com/thlorenz/doctoc.\r\n[INFO] Once installed this environment will be reused.\r\n[INFO] This may take a few minutes...\r\nAn unexpected error has occurred: CalledProcessError: command: ('/home/joakim/.cache/pre-commit/repoizbdwm4g/node_env-system/bin/node', '/usr/local/bin/npm', 'install', '-g', '.')\r\nreturn code: 254\r\nexpected return code: 0\r\nstdout: (none)\r\nstderr:\r\n npm ERR! code ENOENT\r\n npm ERR! syscall chmod\r\n npm ERR! path /home/joakim/.cache/pre-commit/repoizbdwm4g/node_env-system/lib/node_modules/doctoc/doctoc.js\r\n npm ERR! errno -2\r\n npm ERR! enoent ENOENT: no such file or directory, chmod '/home/joakim/.cache/pre-commit/repoizbdwm4g/node_env-system/lib/node_modules/doctoc/doctoc.js'\r\n npm ERR! enoent This is related to npm not being able to find a file.\r\n npm ERR! enoent \r\n \r\n npm ERR! A complete log of this run can be found in:\r\n npm ERR! /home/joakim/.npm/_logs/2020-05-07T10_30_16_218Z-debug.log\r\n \r\nCheck the log at /home/joakim/.cache/pre-commit/pre-commit.log\r\n```\r\nThese hooks all install and run perfectly on GNU/Linux.\r\n\r\nI believe it happens because pre-commit tries to use the --prebuilt flag to nodeenv, however this doesn't work for FreeBSD since there are no prebuilt packages (the URL constructed gives a 404). I have node installed so it should be possible to use --system, but I haven't been able to make it work myself.\r\n\r\nNodeenv itself seems to work fine with --system.\r\n\r\nPlease let me know if I can help with trying things on FreeBSD.\r\n\n", "before_files": [{"content": "import contextlib\nimport logging\nimport os.path\nimport sqlite3\nimport tempfile\nfrom typing import Callable\nfrom typing import Generator\nfrom typing import List\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit import file_lock\nfrom pre_commit import git\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.util import resource_text\nfrom pre_commit.util import rmtree\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _get_default_directory() -> str:\n \"\"\"Returns the default directory for the Store. This is intentionally\n underscored to indicate that `Store.get_default_directory` is the intended\n way to get this information. This is also done so\n `Store.get_default_directory` can be mocked in tests and\n `_get_default_directory` can be tested.\n \"\"\"\n return os.environ.get('PRE_COMMIT_HOME') or os.path.join(\n os.environ.get('XDG_CACHE_HOME') or os.path.expanduser('~/.cache'),\n 'pre-commit',\n )\n\n\nclass Store:\n get_default_directory = staticmethod(_get_default_directory)\n\n def __init__(self, directory: Optional[str] = None) -> None:\n self.directory = directory or Store.get_default_directory()\n self.db_path = os.path.join(self.directory, 'db.db')\n\n if not os.path.exists(self.directory):\n os.makedirs(self.directory, exist_ok=True)\n with open(os.path.join(self.directory, 'README'), 'w') as f:\n f.write(\n 'This directory is maintained by the pre-commit project.\\n'\n 'Learn more: https://github.com/pre-commit/pre-commit\\n',\n )\n\n if os.path.exists(self.db_path):\n return\n with self.exclusive_lock():\n # Another process may have already completed this work\n if os.path.exists(self.db_path): # pragma: no cover (race)\n return\n # To avoid a race where someone ^Cs between db creation and\n # execution of the CREATE TABLE statement\n fd, tmpfile = tempfile.mkstemp(dir=self.directory)\n # We'll be managing this file ourselves\n os.close(fd)\n with self.connect(db_path=tmpfile) as db:\n db.executescript(\n 'CREATE TABLE repos ('\n ' repo TEXT NOT NULL,'\n ' ref TEXT NOT NULL,'\n ' path TEXT NOT NULL,'\n ' PRIMARY KEY (repo, ref)'\n ');',\n )\n self._create_config_table(db)\n\n # Atomic file move\n os.rename(tmpfile, self.db_path)\n\n @contextlib.contextmanager\n def exclusive_lock(self) -> Generator[None, None, None]:\n def blocked_cb() -> None: # pragma: no cover (tests are in-process)\n logger.info('Locking pre-commit directory')\n\n with file_lock.lock(os.path.join(self.directory, '.lock'), blocked_cb):\n yield\n\n @contextlib.contextmanager\n def connect(\n self,\n db_path: Optional[str] = None,\n ) -> Generator[sqlite3.Connection, None, None]:\n db_path = db_path or self.db_path\n # sqlite doesn't close its fd with its contextmanager >.<\n # contextlib.closing fixes this.\n # See: https://stackoverflow.com/a/28032829/812183\n with contextlib.closing(sqlite3.connect(db_path)) as db:\n # this creates a transaction\n with db:\n yield db\n\n @classmethod\n def db_repo_name(cls, repo: str, deps: Sequence[str]) -> str:\n if deps:\n return f'{repo}:{\",\".join(sorted(deps))}'\n else:\n return repo\n\n def _new_repo(\n self,\n repo: str,\n ref: str,\n deps: Sequence[str],\n make_strategy: Callable[[str], None],\n ) -> str:\n repo = self.db_repo_name(repo, deps)\n\n def _get_result() -> Optional[str]:\n # Check if we already exist\n with self.connect() as db:\n result = db.execute(\n 'SELECT path FROM repos WHERE repo = ? AND ref = ?',\n (repo, ref),\n ).fetchone()\n return result[0] if result else None\n\n result = _get_result()\n if result:\n return result\n with self.exclusive_lock():\n # Another process may have already completed this work\n result = _get_result()\n if result: # pragma: no cover (race)\n return result\n\n logger.info(f'Initializing environment for {repo}.')\n\n directory = tempfile.mkdtemp(prefix='repo', dir=self.directory)\n with clean_path_on_failure(directory):\n make_strategy(directory)\n\n # Update our db with the created repo\n with self.connect() as db:\n db.execute(\n 'INSERT INTO repos (repo, ref, path) VALUES (?, ?, ?)',\n [repo, ref, directory],\n )\n return directory\n\n def _complete_clone(self, ref: str, git_cmd: Callable[..., None]) -> None:\n \"\"\"Perform a complete clone of a repository and its submodules \"\"\"\n\n git_cmd('fetch', 'origin', '--tags')\n git_cmd('checkout', ref)\n git_cmd('submodule', 'update', '--init', '--recursive')\n\n def _shallow_clone(self, ref: str, git_cmd: Callable[..., None]) -> None:\n \"\"\"Perform a shallow clone of a repository and its submodules \"\"\"\n\n git_config = 'protocol.version=2'\n git_cmd('-c', git_config, 'fetch', 'origin', ref, '--depth=1')\n git_cmd('checkout', 'FETCH_HEAD')\n git_cmd(\n '-c', git_config, 'submodule', 'update', '--init', '--recursive',\n '--depth=1',\n )\n\n def clone(self, repo: str, ref: str, deps: Sequence[str] = ()) -> str:\n \"\"\"Clone the given url and checkout the specific ref.\"\"\"\n\n def clone_strategy(directory: str) -> None:\n git.init_repo(directory, repo)\n env = git.no_git_env()\n\n def _git_cmd(*args: str) -> None:\n cmd_output_b('git', *args, cwd=directory, env=env)\n\n try:\n self._shallow_clone(ref, _git_cmd)\n except CalledProcessError:\n self._complete_clone(ref, _git_cmd)\n\n return self._new_repo(repo, ref, deps, clone_strategy)\n\n LOCAL_RESOURCES = (\n 'Cargo.toml', 'main.go', 'go.mod', 'main.rs', '.npmignore',\n 'package.json', 'pre_commit_dummy_package.gemspec', 'setup.py',\n 'environment.yml', 'Makefile.PL',\n )\n\n def make_local(self, deps: Sequence[str]) -> str:\n def make_local_strategy(directory: str) -> None:\n for resource in self.LOCAL_RESOURCES:\n contents = resource_text(f'empty_template_{resource}')\n with open(os.path.join(directory, resource), 'w') as f:\n f.write(contents)\n\n env = git.no_git_env()\n\n # initialize the git repository so it looks more like cloned repos\n def _git_cmd(*args: str) -> None:\n cmd_output_b('git', *args, cwd=directory, env=env)\n\n git.init_repo(directory, '<<unknown>>')\n _git_cmd('add', '.')\n git.commit(repo=directory)\n\n return self._new_repo(\n 'local', C.LOCAL_REPO_VERSION, deps, make_local_strategy,\n )\n\n def _create_config_table(self, db: sqlite3.Connection) -> None:\n db.executescript(\n 'CREATE TABLE IF NOT EXISTS configs ('\n ' path TEXT NOT NULL,'\n ' PRIMARY KEY (path)'\n ');',\n )\n\n def mark_config_used(self, path: str) -> None:\n path = os.path.realpath(path)\n # don't insert config files that do not exist\n if not os.path.exists(path):\n return\n with self.connect() as db:\n # TODO: eventually remove this and only create in _create\n self._create_config_table(db)\n db.execute('INSERT OR IGNORE INTO configs VALUES (?)', (path,))\n\n def select_all_configs(self) -> List[str]:\n with self.connect() as db:\n self._create_config_table(db)\n rows = db.execute('SELECT path FROM configs').fetchall()\n return [path for path, in rows]\n\n def delete_configs(self, configs: List[str]) -> None:\n with self.connect() as db:\n rows = [(path,) for path in configs]\n db.executemany('DELETE FROM configs WHERE path = ?', rows)\n\n def select_all_repos(self) -> List[Tuple[str, str, str]]:\n with self.connect() as db:\n return db.execute('SELECT repo, ref, path from repos').fetchall()\n\n def delete_repo(self, db_repo_name: str, ref: str, path: str) -> None:\n with self.connect() as db:\n db.execute(\n 'DELETE FROM repos WHERE repo = ? and ref = ?',\n (db_repo_name, ref),\n )\n rmtree(path)\n", "path": "pre_commit/store.py"}]} | 3,772 | 139 |
gh_patches_debug_5247 | rasdani/github-patches | git_diff | pytorch__ignite-1978 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve Accumulation metric on tensors to work with Apex
## 🚀 Feature Request
It seems that the ignite is not compatible with apex. When i am trying to use the (automatic mixed precision)amp from the Apex for the GPU Acceleration, I have encountered the following error. Look around the Ignite source code and I find that it uses the original torch.Tensor. Is there any timeline for Ignite to be compatible with Apex?
```
m.update(loss)
--
File "/usr/local/lib/python3.6/site-packages/ignite/metrics/metric.py", line 575, in wrapper
func(self, *args, **kwargs)
File "/usr/local/lib/python3.6/site-packages/ignite/metrics/accumulation.py", line 73, in update
self.accumulator = self._op(self.accumulator, output)
File "/usr/local/lib/python3.6/site-packages/ignite/metrics/accumulation.py", line 130, in _mean_op
return a + x
File "/usr/local/lib64/python3.6/site-packages/apex/amp/wrap.py", line 62, in wrapper
.format(types))
NotImplementedError: Do not know how to handle these types to promote: {'FloatTensor', 'DoubleTensor'}
```
Apex: https://github.com/NVIDIA/apex
## Environment
- PyTorch Version (1.4.0):
- Ignite Version (0.4.4):
- OS (Linux):
- Python version: python 3.6
- Apex Version (0.1)
</issue>
<code>
[start of ignite/metrics/accumulation.py]
1 import numbers
2 from typing import Callable, Tuple, Union
3
4 import torch
5
6 from ignite.exceptions import NotComputableError
7 from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
8
9 __all__ = ["VariableAccumulation", "GeometricAverage", "Average"]
10
11
12 class VariableAccumulation(Metric):
13 """Single variable accumulator helper to compute (arithmetic, geometric, harmonic) average of a single variable.
14
15 - ``update`` must receive output of the form `x`.
16 - `x` can be a number or `torch.Tensor`.
17
18 Note:
19
20 The class stores input into two public variables: `accumulator` and `num_examples`.
21 Number of samples is updated following the rule:
22
23 - `+1` if input is a number
24 - `+1` if input is a 1D `torch.Tensor`
25 - `+batch_size` if input is a ND `torch.Tensor`. Batch size is the first dimension (`shape[0]`).
26
27 Args:
28 op: a callable to update accumulator. Method's signature is `(accumulator, output)`.
29 For example, to compute arithmetic mean value, `op = lambda a, x: a + x`.
30 output_transform: a callable that is used to transform the
31 :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
32 form expected by the metric. This can be useful if, for example, you have a multi-output model and
33 you want to compute the metric with respect to one of the outputs.
34 device: specifies which device updates are accumulated on. Setting the metric's
35 device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
36 default, CPU.
37
38 """
39
40 required_output_keys = None
41
42 def __init__(
43 self,
44 op: Callable,
45 output_transform: Callable = lambda x: x,
46 device: Union[str, torch.device] = torch.device("cpu"),
47 ):
48 if not callable(op):
49 raise TypeError(f"Argument op should be a callable, but given {type(op)}")
50
51 self._op = op
52
53 super(VariableAccumulation, self).__init__(output_transform=output_transform, device=device)
54
55 @reinit__is_reduced
56 def reset(self) -> None:
57 self.accumulator = torch.tensor(0.0, dtype=torch.float64, device=self._device)
58 self.num_examples = 0
59
60 def _check_output_type(self, output: Union[float, torch.Tensor]) -> None:
61 if not (isinstance(output, numbers.Number) or isinstance(output, torch.Tensor)):
62 raise TypeError(f"Output should be a number or torch.Tensor, but given {type(output)}")
63
64 @reinit__is_reduced
65 def update(self, output: Union[float, torch.Tensor]) -> None:
66 self._check_output_type(output)
67
68 if isinstance(output, torch.Tensor):
69 output = output.detach()
70 if output.device != self._device:
71 output = output.to(self._device)
72
73 self.accumulator = self._op(self.accumulator, output)
74
75 if isinstance(output, torch.Tensor):
76 self.num_examples += output.shape[0] if len(output.shape) > 1 else 1
77 else:
78 self.num_examples += 1
79
80 @sync_all_reduce("accumulator", "num_examples")
81 def compute(self) -> Tuple[torch.Tensor, int]:
82 return self.accumulator, self.num_examples
83
84
85 class Average(VariableAccumulation):
86 """Helper class to compute arithmetic average of a single variable.
87
88 - ``update`` must receive output of the form `x`.
89 - `x` can be a number or `torch.Tensor`.
90
91 Note:
92
93 Number of samples is updated following the rule:
94
95 - `+1` if input is a number
96 - `+1` if input is a 1D `torch.Tensor`
97 - `+batch_size` if input is an ND `torch.Tensor`. Batch size is the first dimension (`shape[0]`).
98
99 For input `x` being an ND `torch.Tensor` with N > 1, the first dimension is seen as the number of samples and
100 is summed up and added to the accumulator: `accumulator += x.sum(dim=0)`
101
102 Examples:
103
104 .. code-block:: python
105
106 evaluator = ...
107
108 custom_var_mean = Average(output_transform=lambda output: output['custom_var'])
109 custom_var_mean.attach(evaluator, 'mean_custom_var')
110
111 state = evaluator.run(dataset)
112 # state.metrics['mean_custom_var'] -> average of output['custom_var']
113
114 Args:
115 output_transform: a callable that is used to transform the
116 :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
117 form expected by the metric. This can be useful if, for example, you have a multi-output model and
118 you want to compute the metric with respect to one of the outputs.
119 device: specifies which device updates are accumulated on. Setting the metric's
120 device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
121 default, CPU.
122 """
123
124 def __init__(
125 self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
126 ):
127 def _mean_op(a: Union[float, torch.Tensor], x: Union[float, torch.Tensor]) -> Union[float, torch.Tensor]:
128 if isinstance(x, torch.Tensor) and x.ndim > 1:
129 x = x.sum(dim=0)
130 return a + x
131
132 super(Average, self).__init__(op=_mean_op, output_transform=output_transform, device=device)
133
134 @sync_all_reduce("accumulator", "num_examples")
135 def compute(self) -> Union[float, torch.Tensor]:
136 if self.num_examples < 1:
137 raise NotComputableError(
138 f"{self.__class__.__name__} must have at least one example before it can be computed."
139 )
140
141 return self.accumulator / self.num_examples
142
143
144 class GeometricAverage(VariableAccumulation):
145 """Helper class to compute geometric average of a single variable.
146
147 - ``update`` must receive output of the form `x`.
148 - `x` can be a positive number or a positive `torch.Tensor`, such that ``torch.log(x)`` is not `nan`.
149
150 Note:
151
152 Number of samples is updated following the rule:
153
154 - `+1` if input is a number
155 - `+1` if input is a 1D `torch.Tensor`
156 - `+batch_size` if input is a ND `torch.Tensor`. Batch size is the first dimension (`shape[0]`).
157
158 For input `x` being an ND `torch.Tensor` with N > 1, the first dimension is seen as the number of samples and
159 is aggregated and added to the accumulator: `accumulator *= prod(x, dim=0)`
160
161 Args:
162 output_transform: a callable that is used to transform the
163 :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
164 form expected by the metric. This can be useful if, for example, you have a multi-output model and
165 you want to compute the metric with respect to one of the outputs.
166 device: specifies which device updates are accumulated on. Setting the metric's
167 device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
168 default, CPU.
169
170 """
171
172 def __init__(
173 self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
174 ):
175 def _geom_op(a: torch.Tensor, x: Union[float, torch.Tensor]) -> torch.Tensor:
176 if not isinstance(x, torch.Tensor):
177 x = torch.tensor(x)
178 x = torch.log(x)
179 if x.ndim > 1:
180 x = x.sum(dim=0)
181 return a + x
182
183 super(GeometricAverage, self).__init__(op=_geom_op, output_transform=output_transform, device=device)
184
185 @sync_all_reduce("accumulator", "num_examples")
186 def compute(self) -> Union[float, torch.Tensor]:
187 if self.num_examples < 1:
188 raise NotComputableError(
189 f"{self.__class__.__name__} must have at least one example before it can be computed."
190 )
191
192 tensor = torch.exp(self.accumulator / self.num_examples)
193
194 if tensor.numel() == 1:
195 return tensor.item()
196
197 return tensor
198
[end of ignite/metrics/accumulation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ignite/metrics/accumulation.py b/ignite/metrics/accumulation.py
--- a/ignite/metrics/accumulation.py
+++ b/ignite/metrics/accumulation.py
@@ -67,8 +67,8 @@
if isinstance(output, torch.Tensor):
output = output.detach()
- if output.device != self._device:
- output = output.to(self._device)
+ if not (output.device == self._device and output.dtype == self.accumulator.dtype):
+ output = output.to(self.accumulator)
self.accumulator = self._op(self.accumulator, output)
| {"golden_diff": "diff --git a/ignite/metrics/accumulation.py b/ignite/metrics/accumulation.py\n--- a/ignite/metrics/accumulation.py\n+++ b/ignite/metrics/accumulation.py\n@@ -67,8 +67,8 @@\n \n if isinstance(output, torch.Tensor):\n output = output.detach()\n- if output.device != self._device:\n- output = output.to(self._device)\n+ if not (output.device == self._device and output.dtype == self.accumulator.dtype):\n+ output = output.to(self.accumulator)\n \n self.accumulator = self._op(self.accumulator, output)\n", "issue": "Improve Accumulation metric on tensors to work with Apex\n## \ud83d\ude80 Feature Request\r\nIt seems that the ignite is not compatible with apex. When i am trying to use the (automatic mixed precision)amp from the Apex for the GPU Acceleration, I have encountered the following error. Look around the Ignite source code and I find that it uses the original torch.Tensor. Is there any timeline for Ignite to be compatible with Apex?\r\n```\r\nm.update(loss)\r\n--\r\nFile \"/usr/local/lib/python3.6/site-packages/ignite/metrics/metric.py\", line 575, in wrapper\r\nfunc(self, *args, **kwargs)\r\nFile \"/usr/local/lib/python3.6/site-packages/ignite/metrics/accumulation.py\", line 73, in update\r\nself.accumulator = self._op(self.accumulator, output)\r\nFile \"/usr/local/lib/python3.6/site-packages/ignite/metrics/accumulation.py\", line 130, in _mean_op\r\nreturn a + x\r\nFile \"/usr/local/lib64/python3.6/site-packages/apex/amp/wrap.py\", line 62, in wrapper\r\n.format(types))\r\nNotImplementedError: Do not know how to handle these types to promote: {'FloatTensor', 'DoubleTensor'}\r\n```\r\nApex: https://github.com/NVIDIA/apex\r\n\r\n## Environment\r\n\r\n- PyTorch Version (1.4.0):\r\n- Ignite Version (0.4.4):\r\n- OS (Linux):\r\n- Python version: python 3.6\r\n- Apex Version (0.1)\n", "before_files": [{"content": "import numbers\nfrom typing import Callable, Tuple, Union\n\nimport torch\n\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce\n\n__all__ = [\"VariableAccumulation\", \"GeometricAverage\", \"Average\"]\n\n\nclass VariableAccumulation(Metric):\n \"\"\"Single variable accumulator helper to compute (arithmetic, geometric, harmonic) average of a single variable.\n\n - ``update`` must receive output of the form `x`.\n - `x` can be a number or `torch.Tensor`.\n\n Note:\n\n The class stores input into two public variables: `accumulator` and `num_examples`.\n Number of samples is updated following the rule:\n\n - `+1` if input is a number\n - `+1` if input is a 1D `torch.Tensor`\n - `+batch_size` if input is a ND `torch.Tensor`. Batch size is the first dimension (`shape[0]`).\n\n Args:\n op: a callable to update accumulator. Method's signature is `(accumulator, output)`.\n For example, to compute arithmetic mean value, `op = lambda a, x: a + x`.\n output_transform: a callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n device: specifies which device updates are accumulated on. Setting the metric's\n device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By\n default, CPU.\n\n \"\"\"\n\n required_output_keys = None\n\n def __init__(\n self,\n op: Callable,\n output_transform: Callable = lambda x: x,\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ):\n if not callable(op):\n raise TypeError(f\"Argument op should be a callable, but given {type(op)}\")\n\n self._op = op\n\n super(VariableAccumulation, self).__init__(output_transform=output_transform, device=device)\n\n @reinit__is_reduced\n def reset(self) -> None:\n self.accumulator = torch.tensor(0.0, dtype=torch.float64, device=self._device)\n self.num_examples = 0\n\n def _check_output_type(self, output: Union[float, torch.Tensor]) -> None:\n if not (isinstance(output, numbers.Number) or isinstance(output, torch.Tensor)):\n raise TypeError(f\"Output should be a number or torch.Tensor, but given {type(output)}\")\n\n @reinit__is_reduced\n def update(self, output: Union[float, torch.Tensor]) -> None:\n self._check_output_type(output)\n\n if isinstance(output, torch.Tensor):\n output = output.detach()\n if output.device != self._device:\n output = output.to(self._device)\n\n self.accumulator = self._op(self.accumulator, output)\n\n if isinstance(output, torch.Tensor):\n self.num_examples += output.shape[0] if len(output.shape) > 1 else 1\n else:\n self.num_examples += 1\n\n @sync_all_reduce(\"accumulator\", \"num_examples\")\n def compute(self) -> Tuple[torch.Tensor, int]:\n return self.accumulator, self.num_examples\n\n\nclass Average(VariableAccumulation):\n \"\"\"Helper class to compute arithmetic average of a single variable.\n\n - ``update`` must receive output of the form `x`.\n - `x` can be a number or `torch.Tensor`.\n\n Note:\n\n Number of samples is updated following the rule:\n\n - `+1` if input is a number\n - `+1` if input is a 1D `torch.Tensor`\n - `+batch_size` if input is an ND `torch.Tensor`. Batch size is the first dimension (`shape[0]`).\n\n For input `x` being an ND `torch.Tensor` with N > 1, the first dimension is seen as the number of samples and\n is summed up and added to the accumulator: `accumulator += x.sum(dim=0)`\n\n Examples:\n\n .. code-block:: python\n\n evaluator = ...\n\n custom_var_mean = Average(output_transform=lambda output: output['custom_var'])\n custom_var_mean.attach(evaluator, 'mean_custom_var')\n\n state = evaluator.run(dataset)\n # state.metrics['mean_custom_var'] -> average of output['custom_var']\n\n Args:\n output_transform: a callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n device: specifies which device updates are accumulated on. Setting the metric's\n device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By\n default, CPU.\n \"\"\"\n\n def __init__(\n self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device(\"cpu\")\n ):\n def _mean_op(a: Union[float, torch.Tensor], x: Union[float, torch.Tensor]) -> Union[float, torch.Tensor]:\n if isinstance(x, torch.Tensor) and x.ndim > 1:\n x = x.sum(dim=0)\n return a + x\n\n super(Average, self).__init__(op=_mean_op, output_transform=output_transform, device=device)\n\n @sync_all_reduce(\"accumulator\", \"num_examples\")\n def compute(self) -> Union[float, torch.Tensor]:\n if self.num_examples < 1:\n raise NotComputableError(\n f\"{self.__class__.__name__} must have at least one example before it can be computed.\"\n )\n\n return self.accumulator / self.num_examples\n\n\nclass GeometricAverage(VariableAccumulation):\n \"\"\"Helper class to compute geometric average of a single variable.\n\n - ``update`` must receive output of the form `x`.\n - `x` can be a positive number or a positive `torch.Tensor`, such that ``torch.log(x)`` is not `nan`.\n\n Note:\n\n Number of samples is updated following the rule:\n\n - `+1` if input is a number\n - `+1` if input is a 1D `torch.Tensor`\n - `+batch_size` if input is a ND `torch.Tensor`. Batch size is the first dimension (`shape[0]`).\n\n For input `x` being an ND `torch.Tensor` with N > 1, the first dimension is seen as the number of samples and\n is aggregated and added to the accumulator: `accumulator *= prod(x, dim=0)`\n\n Args:\n output_transform: a callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n device: specifies which device updates are accumulated on. Setting the metric's\n device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By\n default, CPU.\n\n \"\"\"\n\n def __init__(\n self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device(\"cpu\")\n ):\n def _geom_op(a: torch.Tensor, x: Union[float, torch.Tensor]) -> torch.Tensor:\n if not isinstance(x, torch.Tensor):\n x = torch.tensor(x)\n x = torch.log(x)\n if x.ndim > 1:\n x = x.sum(dim=0)\n return a + x\n\n super(GeometricAverage, self).__init__(op=_geom_op, output_transform=output_transform, device=device)\n\n @sync_all_reduce(\"accumulator\", \"num_examples\")\n def compute(self) -> Union[float, torch.Tensor]:\n if self.num_examples < 1:\n raise NotComputableError(\n f\"{self.__class__.__name__} must have at least one example before it can be computed.\"\n )\n\n tensor = torch.exp(self.accumulator / self.num_examples)\n\n if tensor.numel() == 1:\n return tensor.item()\n\n return tensor\n", "path": "ignite/metrics/accumulation.py"}]} | 3,223 | 136 |
gh_patches_debug_33990 | rasdani/github-patches | git_diff | netbox-community__netbox-16049 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error on cable edit after B port was deleted / wrong status
### Deployment Type
Self-hosted (Docker)
### NetBox Version
v3.7.5
### Python Version
3.11
### Steps to Reproduce
1. Have a device with front- and rear-Ports (Patch Panel)
2. Have a device with interfaces (switches)
3. Connect switch interface (here gi43) with front-port (here 22)
4. Delete rear ports on patch panel device
5. Go To Connections > Cables
6. Click on edit of cable --> error message
### Expected Behavior
- Edit Button works to connect cable again
### Observed Behavior
- Error Message

Cables/UI: Unable to change from front to rear while editing cable
### NetBox version
v3.5.6
### Feature type
Change to existing functionality
### Proposed functionality
Currently, if someone accidentally connects a cable to the rear port instead of the front port, the entire cable must be deleted and created again. It would be nice to be able to change not only the port number but also the location (front/rear) when editing the cable. This might just be an ui change as the api seems to allow to change it.
### Use case
It is not necessary to delete the cable and enter all information (label, length etc) again. You can just reconnect it.
### Database changes
_No response_
### External dependencies
_No response_
</issue>
<code>
[start of netbox/dcim/forms/connections.py]
1 from django import forms
2 from django.contrib.contenttypes.models import ContentType
3 from django.utils.translation import gettext_lazy as _
4
5 from circuits.models import Circuit, CircuitTermination
6 from dcim.models import *
7 from utilities.forms.fields import DynamicModelChoiceField, DynamicModelMultipleChoiceField
8 from .model_forms import CableForm
9
10
11 def get_cable_form(a_type, b_type):
12
13 class FormMetaclass(forms.models.ModelFormMetaclass):
14
15 def __new__(mcs, name, bases, attrs):
16
17 for cable_end, term_cls in (('a', a_type), ('b', b_type)):
18
19 # Device component
20 if hasattr(term_cls, 'device'):
21
22 attrs[f'termination_{cable_end}_device'] = DynamicModelChoiceField(
23 queryset=Device.objects.all(),
24 label=_('Device'),
25 required=False,
26 selector=True,
27 initial_params={
28 f'{term_cls._meta.model_name}s__in': f'${cable_end}_terminations'
29 }
30 )
31 attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(
32 queryset=term_cls.objects.all(),
33 label=term_cls._meta.verbose_name.title(),
34 context={
35 'disabled': '_occupied',
36 },
37 query_params={
38 'device_id': f'$termination_{cable_end}_device',
39 'kind': 'physical', # Exclude virtual interfaces
40 }
41 )
42
43 # PowerFeed
44 elif term_cls == PowerFeed:
45
46 attrs[f'termination_{cable_end}_powerpanel'] = DynamicModelChoiceField(
47 queryset=PowerPanel.objects.all(),
48 label=_('Power Panel'),
49 required=False,
50 selector=True,
51 initial_params={
52 'powerfeeds__in': f'${cable_end}_terminations'
53 }
54 )
55 attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(
56 queryset=term_cls.objects.all(),
57 label=_('Power Feed'),
58 context={
59 'disabled': '_occupied',
60 },
61 query_params={
62 'power_panel_id': f'$termination_{cable_end}_powerpanel',
63 }
64 )
65
66 # CircuitTermination
67 elif term_cls == CircuitTermination:
68
69 attrs[f'termination_{cable_end}_circuit'] = DynamicModelChoiceField(
70 queryset=Circuit.objects.all(),
71 label=_('Circuit'),
72 selector=True,
73 initial_params={
74 'terminations__in': f'${cable_end}_terminations'
75 }
76 )
77 attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(
78 queryset=term_cls.objects.all(),
79 label=_('Side'),
80 context={
81 'disabled': '_occupied',
82 },
83 query_params={
84 'circuit_id': f'$termination_{cable_end}_circuit',
85 }
86 )
87
88 return super().__new__(mcs, name, bases, attrs)
89
90 class _CableForm(CableForm, metaclass=FormMetaclass):
91
92 def __init__(self, *args, initial=None, **kwargs):
93
94 initial = initial or {}
95 if a_type:
96 ct = ContentType.objects.get_for_model(a_type)
97 initial['a_terminations_type'] = f'{ct.app_label}.{ct.model}'
98 if b_type:
99 ct = ContentType.objects.get_for_model(b_type)
100 initial['b_terminations_type'] = f'{ct.app_label}.{ct.model}'
101
102 # TODO: Temporary hack to work around list handling limitations with utils.normalize_querydict()
103 for field_name in ('a_terminations', 'b_terminations'):
104 if field_name in initial and type(initial[field_name]) is not list:
105 initial[field_name] = [initial[field_name]]
106
107 super().__init__(*args, initial=initial, **kwargs)
108
109 if self.instance and self.instance.pk:
110 # Initialize A/B terminations when modifying an existing Cable instance
111 self.initial['a_terminations'] = self.instance.a_terminations
112 self.initial['b_terminations'] = self.instance.b_terminations
113
114 def clean(self):
115 super().clean()
116
117 # Set the A/B terminations on the Cable instance
118 self.instance.a_terminations = self.cleaned_data.get('a_terminations', [])
119 self.instance.b_terminations = self.cleaned_data.get('b_terminations', [])
120
121 return _CableForm
122
[end of netbox/dcim/forms/connections.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/netbox/dcim/forms/connections.py b/netbox/dcim/forms/connections.py
--- a/netbox/dcim/forms/connections.py
+++ b/netbox/dcim/forms/connections.py
@@ -90,14 +90,14 @@
class _CableForm(CableForm, metaclass=FormMetaclass):
def __init__(self, *args, initial=None, **kwargs):
-
initial = initial or {}
+
if a_type:
- ct = ContentType.objects.get_for_model(a_type)
- initial['a_terminations_type'] = f'{ct.app_label}.{ct.model}'
+ a_ct = ContentType.objects.get_for_model(a_type)
+ initial['a_terminations_type'] = f'{a_ct.app_label}.{a_ct.model}'
if b_type:
- ct = ContentType.objects.get_for_model(b_type)
- initial['b_terminations_type'] = f'{ct.app_label}.{ct.model}'
+ b_ct = ContentType.objects.get_for_model(b_type)
+ initial['b_terminations_type'] = f'{b_ct.app_label}.{b_ct.model}'
# TODO: Temporary hack to work around list handling limitations with utils.normalize_querydict()
for field_name in ('a_terminations', 'b_terminations'):
@@ -108,8 +108,17 @@
if self.instance and self.instance.pk:
# Initialize A/B terminations when modifying an existing Cable instance
- self.initial['a_terminations'] = self.instance.a_terminations
- self.initial['b_terminations'] = self.instance.b_terminations
+ if a_type and self.instance.a_terminations and a_ct == ContentType.objects.get_for_model(self.instance.a_terminations[0]):
+ self.initial['a_terminations'] = self.instance.a_terminations
+ if b_type and self.instance.b_terminations and b_ct == ContentType.objects.get_for_model(self.instance.b_terminations[0]):
+ self.initial['b_terminations'] = self.instance.b_terminations
+ else:
+ # Need to clear terminations if swapped type - but need to do it only
+ # if not from instance
+ if a_type:
+ initial.pop('a_terminations', None)
+ if b_type:
+ initial.pop('b_terminations', None)
def clean(self):
super().clean()
| {"golden_diff": "diff --git a/netbox/dcim/forms/connections.py b/netbox/dcim/forms/connections.py\n--- a/netbox/dcim/forms/connections.py\n+++ b/netbox/dcim/forms/connections.py\n@@ -90,14 +90,14 @@\n class _CableForm(CableForm, metaclass=FormMetaclass):\n \n def __init__(self, *args, initial=None, **kwargs):\n-\n initial = initial or {}\n+\n if a_type:\n- ct = ContentType.objects.get_for_model(a_type)\n- initial['a_terminations_type'] = f'{ct.app_label}.{ct.model}'\n+ a_ct = ContentType.objects.get_for_model(a_type)\n+ initial['a_terminations_type'] = f'{a_ct.app_label}.{a_ct.model}'\n if b_type:\n- ct = ContentType.objects.get_for_model(b_type)\n- initial['b_terminations_type'] = f'{ct.app_label}.{ct.model}'\n+ b_ct = ContentType.objects.get_for_model(b_type)\n+ initial['b_terminations_type'] = f'{b_ct.app_label}.{b_ct.model}'\n \n # TODO: Temporary hack to work around list handling limitations with utils.normalize_querydict()\n for field_name in ('a_terminations', 'b_terminations'):\n@@ -108,8 +108,17 @@\n \n if self.instance and self.instance.pk:\n # Initialize A/B terminations when modifying an existing Cable instance\n- self.initial['a_terminations'] = self.instance.a_terminations\n- self.initial['b_terminations'] = self.instance.b_terminations\n+ if a_type and self.instance.a_terminations and a_ct == ContentType.objects.get_for_model(self.instance.a_terminations[0]):\n+ self.initial['a_terminations'] = self.instance.a_terminations\n+ if b_type and self.instance.b_terminations and b_ct == ContentType.objects.get_for_model(self.instance.b_terminations[0]):\n+ self.initial['b_terminations'] = self.instance.b_terminations\n+ else:\n+ # Need to clear terminations if swapped type - but need to do it only\n+ # if not from instance\n+ if a_type:\n+ initial.pop('a_terminations', None)\n+ if b_type:\n+ initial.pop('b_terminations', None)\n \n def clean(self):\n super().clean()\n", "issue": "Error on cable edit after B port was deleted / wrong status\n### Deployment Type\r\n\r\nSelf-hosted (Docker)\r\n\r\n### NetBox Version\r\n\r\nv3.7.5\r\n\r\n### Python Version\r\n\r\n3.11\r\n\r\n### Steps to Reproduce\r\n\r\n1. Have a device with front- and rear-Ports (Patch Panel)\r\n2. Have a device with interfaces (switches)\r\n3. Connect switch interface (here gi43) with front-port (here 22)\r\n4. Delete rear ports on patch panel device\r\n5. Go To Connections > Cables\r\n6. Click on edit of cable --> error message\r\n\r\n### Expected Behavior\r\n\r\n- Edit Button works to connect cable again\r\n\r\n### Observed Behavior\r\n\r\n- Error Message \r\n\r\n\nCables/UI: Unable to change from front to rear while editing cable\n### NetBox version\n\nv3.5.6\n\n### Feature type\n\nChange to existing functionality\n\n### Proposed functionality\n\nCurrently, if someone accidentally connects a cable to the rear port instead of the front port, the entire cable must be deleted and created again. It would be nice to be able to change not only the port number but also the location (front/rear) when editing the cable. This might just be an ui change as the api seems to allow to change it.\n\n### Use case\n\nIt is not necessary to delete the cable and enter all information (label, length etc) again. You can just reconnect it.\n\n### Database changes\n\n_No response_\n\n### External dependencies\n\n_No response_\n", "before_files": [{"content": "from django import forms\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.translation import gettext_lazy as _\n\nfrom circuits.models import Circuit, CircuitTermination\nfrom dcim.models import *\nfrom utilities.forms.fields import DynamicModelChoiceField, DynamicModelMultipleChoiceField\nfrom .model_forms import CableForm\n\n\ndef get_cable_form(a_type, b_type):\n\n class FormMetaclass(forms.models.ModelFormMetaclass):\n\n def __new__(mcs, name, bases, attrs):\n\n for cable_end, term_cls in (('a', a_type), ('b', b_type)):\n\n # Device component\n if hasattr(term_cls, 'device'):\n\n attrs[f'termination_{cable_end}_device'] = DynamicModelChoiceField(\n queryset=Device.objects.all(),\n label=_('Device'),\n required=False,\n selector=True,\n initial_params={\n f'{term_cls._meta.model_name}s__in': f'${cable_end}_terminations'\n }\n )\n attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(\n queryset=term_cls.objects.all(),\n label=term_cls._meta.verbose_name.title(),\n context={\n 'disabled': '_occupied',\n },\n query_params={\n 'device_id': f'$termination_{cable_end}_device',\n 'kind': 'physical', # Exclude virtual interfaces\n }\n )\n\n # PowerFeed\n elif term_cls == PowerFeed:\n\n attrs[f'termination_{cable_end}_powerpanel'] = DynamicModelChoiceField(\n queryset=PowerPanel.objects.all(),\n label=_('Power Panel'),\n required=False,\n selector=True,\n initial_params={\n 'powerfeeds__in': f'${cable_end}_terminations'\n }\n )\n attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(\n queryset=term_cls.objects.all(),\n label=_('Power Feed'),\n context={\n 'disabled': '_occupied',\n },\n query_params={\n 'power_panel_id': f'$termination_{cable_end}_powerpanel',\n }\n )\n\n # CircuitTermination\n elif term_cls == CircuitTermination:\n\n attrs[f'termination_{cable_end}_circuit'] = DynamicModelChoiceField(\n queryset=Circuit.objects.all(),\n label=_('Circuit'),\n selector=True,\n initial_params={\n 'terminations__in': f'${cable_end}_terminations'\n }\n )\n attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(\n queryset=term_cls.objects.all(),\n label=_('Side'),\n context={\n 'disabled': '_occupied',\n },\n query_params={\n 'circuit_id': f'$termination_{cable_end}_circuit',\n }\n )\n\n return super().__new__(mcs, name, bases, attrs)\n\n class _CableForm(CableForm, metaclass=FormMetaclass):\n\n def __init__(self, *args, initial=None, **kwargs):\n\n initial = initial or {}\n if a_type:\n ct = ContentType.objects.get_for_model(a_type)\n initial['a_terminations_type'] = f'{ct.app_label}.{ct.model}'\n if b_type:\n ct = ContentType.objects.get_for_model(b_type)\n initial['b_terminations_type'] = f'{ct.app_label}.{ct.model}'\n\n # TODO: Temporary hack to work around list handling limitations with utils.normalize_querydict()\n for field_name in ('a_terminations', 'b_terminations'):\n if field_name in initial and type(initial[field_name]) is not list:\n initial[field_name] = [initial[field_name]]\n\n super().__init__(*args, initial=initial, **kwargs)\n\n if self.instance and self.instance.pk:\n # Initialize A/B terminations when modifying an existing Cable instance\n self.initial['a_terminations'] = self.instance.a_terminations\n self.initial['b_terminations'] = self.instance.b_terminations\n\n def clean(self):\n super().clean()\n\n # Set the A/B terminations on the Cable instance\n self.instance.a_terminations = self.cleaned_data.get('a_terminations', [])\n self.instance.b_terminations = self.cleaned_data.get('b_terminations', [])\n\n return _CableForm\n", "path": "netbox/dcim/forms/connections.py"}]} | 2,125 | 529 |
gh_patches_debug_35052 | rasdani/github-patches | git_diff | pypa__virtualenv-1805 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
activate_this.py failed for python2 virtualenvs
**Issue**
It seems recently pipenv introduced a new type of activate_this.py. On windows the content of activate_this.py has something like this:
```
prev_length = len(sys.path)
for lib in "'..\\Lib\\site-packages".split(os.pathsep):
path = os.path.realpath(os.path.join(bin_dir, lib))
site.addsitedir(path.decode("utf-8") if "'yes" else path)
sys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length]
```
As you can see the "'..\\Lib\\site-packages" is obviously wrong.
**Environment**
Provide at least:
- OS: Windows 10
- ``pip list`` of the host python where ``virtualenv`` is installed:
```console
virtualenv 20.0.18
virtualenv-clone 0.5.4
```
**Output of the virtual environment creation**
As I'm using virtualenv through pipenv, so I failed to grab the virtualenv output
```
[ ==] Creating virtual environment...created virtual environment CPython2.7.17.final.0-64 in 641ms
creator CPython2Windows(dest=C:\Users\win10\.virtualenvs\win10-obmjl69F, clear=False, global=False)
seeder FromAppData(download=False, pip=latest, setuptools=latest, wheel=latest, via=copy, app_data_dir=C:\Users\win10\AppData\Local\pypa\virtualenv\seed-app-data\v1.0.1)
activators BashActivator,BatchActivator,FishActivator,PowerShellActivator,PythonActivator
```
However I've located the related code and wrote out its runtime variable information:
The following is the output of _repr_unicode function in ```src/virtualenv/activation/python/__init__.py```
```
'(win10)
'C:\\Users\\win10\\.virtualenvs\\win10-obmjl69F
'win10-obmjl69F
'Scripts
';
'..\\Lib\\site-packages
'yes
```
As you can see, there's an additional ' before each item. I've done a small experiment on python 3.6 and 3.7:
```
>>> value = "..\\123456"
>>> repr(value.encode("utf-8"))
"b'..\\\\123456'"
>>> repr(value.encode("utf-8"))[1:-1]
"'..\\\\123456"
>>>
```
I believe there's something wrong with this function. This function is introduced in PR #1503
</issue>
<code>
[start of src/virtualenv/activation/python/__init__.py]
1 from __future__ import absolute_import, unicode_literals
2
3 import os
4 from collections import OrderedDict
5
6 from virtualenv.util.path import Path
7 from virtualenv.util.six import ensure_text
8
9 from ..via_template import ViaTemplateActivator
10
11
12 class PythonActivator(ViaTemplateActivator):
13 def templates(self):
14 yield Path("activate_this.py")
15
16 def replacements(self, creator, dest_folder):
17 replacements = super(PythonActivator, self).replacements(creator, dest_folder)
18 lib_folders = OrderedDict((os.path.relpath(str(i), str(dest_folder)), None) for i in creator.libs)
19 win_py2 = creator.interpreter.platform == "win32" and creator.interpreter.version_info.major == 2
20 replacements.update(
21 {
22 "__LIB_FOLDERS__": ensure_text(os.pathsep.join(lib_folders.keys())),
23 "__DECODE_PATH__": ("yes" if win_py2 else ""),
24 }
25 )
26 return replacements
27
28 @staticmethod
29 def _repr_unicode(creator, value):
30 py2 = creator.interpreter.version_info.major == 2
31 if py2: # on Python 2 we need to encode this into explicit utf-8, py3 supports unicode literals
32 value = ensure_text(repr(value.encode("utf-8"))[1:-1])
33 return value
34
[end of src/virtualenv/activation/python/__init__.py]
[start of src/virtualenv/activation/via_template.py]
1 from __future__ import absolute_import, unicode_literals
2
3 import os
4 import sys
5 from abc import ABCMeta, abstractmethod
6
7 from six import add_metaclass
8
9 from virtualenv.util.six import ensure_text
10
11 from .activator import Activator
12
13 if sys.version_info >= (3, 7):
14 from importlib.resources import read_text
15 else:
16 from importlib_resources import read_text
17
18
19 @add_metaclass(ABCMeta)
20 class ViaTemplateActivator(Activator):
21 @abstractmethod
22 def templates(self):
23 raise NotImplementedError
24
25 def generate(self, creator):
26 dest_folder = creator.bin_dir
27 replacements = self.replacements(creator, dest_folder)
28 self._generate(replacements, self.templates(), dest_folder, creator)
29 if self.flag_prompt is not None:
30 creator.pyenv_cfg["prompt"] = self.flag_prompt
31
32 def replacements(self, creator, dest_folder):
33 return {
34 "__VIRTUAL_PROMPT__": "" if self.flag_prompt is None else self.flag_prompt,
35 "__VIRTUAL_ENV__": ensure_text(str(creator.dest)),
36 "__VIRTUAL_NAME__": creator.env_name,
37 "__BIN_NAME__": ensure_text(str(creator.bin_dir.relative_to(creator.dest))),
38 "__PATH_SEP__": ensure_text(os.pathsep),
39 }
40
41 def _generate(self, replacements, templates, to_folder, creator):
42 for template in templates:
43 text = self.instantiate_template(replacements, template, creator)
44 dest = to_folder / self.as_name(template)
45 dest.write_text(text, encoding="utf-8")
46
47 def as_name(self, template):
48 return template.name
49
50 def instantiate_template(self, replacements, template, creator):
51 # read text and do replacements
52 text = read_text(self.__module__, str(template), encoding="utf-8", errors="strict")
53 for key, value in replacements.items():
54 value = self._repr_unicode(creator, value)
55 text = text.replace(key, value)
56 return text
57
58 @staticmethod
59 def _repr_unicode(creator, value):
60 # by default we just let it be unicode
61 return value
62
[end of src/virtualenv/activation/via_template.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/virtualenv/activation/python/__init__.py b/src/virtualenv/activation/python/__init__.py
--- a/src/virtualenv/activation/python/__init__.py
+++ b/src/virtualenv/activation/python/__init__.py
@@ -1,6 +1,7 @@
from __future__ import absolute_import, unicode_literals
import os
+import sys
from collections import OrderedDict
from virtualenv.util.path import Path
@@ -29,5 +30,6 @@
def _repr_unicode(creator, value):
py2 = creator.interpreter.version_info.major == 2
if py2: # on Python 2 we need to encode this into explicit utf-8, py3 supports unicode literals
- value = ensure_text(repr(value.encode("utf-8"))[1:-1])
+ start = 2 if sys.version_info[0] == 3 else 1
+ value = ensure_text(repr(value.encode("utf-8"))[start:-1])
return value
diff --git a/src/virtualenv/activation/via_template.py b/src/virtualenv/activation/via_template.py
--- a/src/virtualenv/activation/via_template.py
+++ b/src/virtualenv/activation/via_template.py
@@ -25,9 +25,10 @@
def generate(self, creator):
dest_folder = creator.bin_dir
replacements = self.replacements(creator, dest_folder)
- self._generate(replacements, self.templates(), dest_folder, creator)
+ generated = self._generate(replacements, self.templates(), dest_folder, creator)
if self.flag_prompt is not None:
creator.pyenv_cfg["prompt"] = self.flag_prompt
+ return generated
def replacements(self, creator, dest_folder):
return {
@@ -39,10 +40,13 @@
}
def _generate(self, replacements, templates, to_folder, creator):
+ generated = []
for template in templates:
text = self.instantiate_template(replacements, template, creator)
dest = to_folder / self.as_name(template)
dest.write_text(text, encoding="utf-8")
+ generated.append(dest)
+ return generated
def as_name(self, template):
return template.name
| {"golden_diff": "diff --git a/src/virtualenv/activation/python/__init__.py b/src/virtualenv/activation/python/__init__.py\n--- a/src/virtualenv/activation/python/__init__.py\n+++ b/src/virtualenv/activation/python/__init__.py\n@@ -1,6 +1,7 @@\n from __future__ import absolute_import, unicode_literals\n \n import os\n+import sys\n from collections import OrderedDict\n \n from virtualenv.util.path import Path\n@@ -29,5 +30,6 @@\n def _repr_unicode(creator, value):\n py2 = creator.interpreter.version_info.major == 2\n if py2: # on Python 2 we need to encode this into explicit utf-8, py3 supports unicode literals\n- value = ensure_text(repr(value.encode(\"utf-8\"))[1:-1])\n+ start = 2 if sys.version_info[0] == 3 else 1\n+ value = ensure_text(repr(value.encode(\"utf-8\"))[start:-1])\n return value\ndiff --git a/src/virtualenv/activation/via_template.py b/src/virtualenv/activation/via_template.py\n--- a/src/virtualenv/activation/via_template.py\n+++ b/src/virtualenv/activation/via_template.py\n@@ -25,9 +25,10 @@\n def generate(self, creator):\n dest_folder = creator.bin_dir\n replacements = self.replacements(creator, dest_folder)\n- self._generate(replacements, self.templates(), dest_folder, creator)\n+ generated = self._generate(replacements, self.templates(), dest_folder, creator)\n if self.flag_prompt is not None:\n creator.pyenv_cfg[\"prompt\"] = self.flag_prompt\n+ return generated\n \n def replacements(self, creator, dest_folder):\n return {\n@@ -39,10 +40,13 @@\n }\n \n def _generate(self, replacements, templates, to_folder, creator):\n+ generated = []\n for template in templates:\n text = self.instantiate_template(replacements, template, creator)\n dest = to_folder / self.as_name(template)\n dest.write_text(text, encoding=\"utf-8\")\n+ generated.append(dest)\n+ return generated\n \n def as_name(self, template):\n return template.name\n", "issue": "activate_this.py failed for python2 virtualenvs\n**Issue**\r\n\r\nIt seems recently pipenv introduced a new type of activate_this.py. On windows the content of activate_this.py has something like this:\r\n```\r\nprev_length = len(sys.path)\r\nfor lib in \"'..\\\\Lib\\\\site-packages\".split(os.pathsep):\r\n path = os.path.realpath(os.path.join(bin_dir, lib))\r\n site.addsitedir(path.decode(\"utf-8\") if \"'yes\" else path)\r\nsys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length]\r\n```\r\nAs you can see the \"'..\\\\Lib\\\\site-packages\" is obviously wrong.\r\n\r\n**Environment**\r\n\r\nProvide at least:\r\n- OS: Windows 10\r\n- ``pip list`` of the host python where ``virtualenv`` is installed:\r\n\r\n ```console\r\n virtualenv 20.0.18\r\n virtualenv-clone 0.5.4\r\n ```\r\n\r\n**Output of the virtual environment creation**\r\n\r\nAs I'm using virtualenv through pipenv, so I failed to grab the virtualenv output\r\n\r\n```\r\n[ ==] Creating virtual environment...created virtual environment CPython2.7.17.final.0-64 in 641ms\r\n\r\n creator CPython2Windows(dest=C:\\Users\\win10\\.virtualenvs\\win10-obmjl69F, clear=False, global=False)\r\n\r\n seeder FromAppData(download=False, pip=latest, setuptools=latest, wheel=latest, via=copy, app_data_dir=C:\\Users\\win10\\AppData\\Local\\pypa\\virtualenv\\seed-app-data\\v1.0.1)\r\n\r\n activators BashActivator,BatchActivator,FishActivator,PowerShellActivator,PythonActivator\r\n```\r\n\r\nHowever I've located the related code and wrote out its runtime variable information:\r\nThe following is the output of _repr_unicode function in ```src/virtualenv/activation/python/__init__.py```\r\n\r\n```\r\n'(win10) \r\n'C:\\\\Users\\\\win10\\\\.virtualenvs\\\\win10-obmjl69F\r\n'win10-obmjl69F\r\n'Scripts\r\n';\r\n'..\\\\Lib\\\\site-packages\r\n'yes\r\n```\r\nAs you can see, there's an additional ' before each item. I've done a small experiment on python 3.6 and 3.7:\r\n```\r\n>>> value = \"..\\\\123456\"\r\n>>> repr(value.encode(\"utf-8\"))\r\n\"b'..\\\\\\\\123456'\"\r\n>>> repr(value.encode(\"utf-8\"))[1:-1]\r\n\"'..\\\\\\\\123456\"\r\n>>>\r\n```\r\nI believe there's something wrong with this function. This function is introduced in PR #1503 \n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nimport os\nfrom collections import OrderedDict\n\nfrom virtualenv.util.path import Path\nfrom virtualenv.util.six import ensure_text\n\nfrom ..via_template import ViaTemplateActivator\n\n\nclass PythonActivator(ViaTemplateActivator):\n def templates(self):\n yield Path(\"activate_this.py\")\n\n def replacements(self, creator, dest_folder):\n replacements = super(PythonActivator, self).replacements(creator, dest_folder)\n lib_folders = OrderedDict((os.path.relpath(str(i), str(dest_folder)), None) for i in creator.libs)\n win_py2 = creator.interpreter.platform == \"win32\" and creator.interpreter.version_info.major == 2\n replacements.update(\n {\n \"__LIB_FOLDERS__\": ensure_text(os.pathsep.join(lib_folders.keys())),\n \"__DECODE_PATH__\": (\"yes\" if win_py2 else \"\"),\n }\n )\n return replacements\n\n @staticmethod\n def _repr_unicode(creator, value):\n py2 = creator.interpreter.version_info.major == 2\n if py2: # on Python 2 we need to encode this into explicit utf-8, py3 supports unicode literals\n value = ensure_text(repr(value.encode(\"utf-8\"))[1:-1])\n return value\n", "path": "src/virtualenv/activation/python/__init__.py"}, {"content": "from __future__ import absolute_import, unicode_literals\n\nimport os\nimport sys\nfrom abc import ABCMeta, abstractmethod\n\nfrom six import add_metaclass\n\nfrom virtualenv.util.six import ensure_text\n\nfrom .activator import Activator\n\nif sys.version_info >= (3, 7):\n from importlib.resources import read_text\nelse:\n from importlib_resources import read_text\n\n\n@add_metaclass(ABCMeta)\nclass ViaTemplateActivator(Activator):\n @abstractmethod\n def templates(self):\n raise NotImplementedError\n\n def generate(self, creator):\n dest_folder = creator.bin_dir\n replacements = self.replacements(creator, dest_folder)\n self._generate(replacements, self.templates(), dest_folder, creator)\n if self.flag_prompt is not None:\n creator.pyenv_cfg[\"prompt\"] = self.flag_prompt\n\n def replacements(self, creator, dest_folder):\n return {\n \"__VIRTUAL_PROMPT__\": \"\" if self.flag_prompt is None else self.flag_prompt,\n \"__VIRTUAL_ENV__\": ensure_text(str(creator.dest)),\n \"__VIRTUAL_NAME__\": creator.env_name,\n \"__BIN_NAME__\": ensure_text(str(creator.bin_dir.relative_to(creator.dest))),\n \"__PATH_SEP__\": ensure_text(os.pathsep),\n }\n\n def _generate(self, replacements, templates, to_folder, creator):\n for template in templates:\n text = self.instantiate_template(replacements, template, creator)\n dest = to_folder / self.as_name(template)\n dest.write_text(text, encoding=\"utf-8\")\n\n def as_name(self, template):\n return template.name\n\n def instantiate_template(self, replacements, template, creator):\n # read text and do replacements\n text = read_text(self.__module__, str(template), encoding=\"utf-8\", errors=\"strict\")\n for key, value in replacements.items():\n value = self._repr_unicode(creator, value)\n text = text.replace(key, value)\n return text\n\n @staticmethod\n def _repr_unicode(creator, value):\n # by default we just let it be unicode\n return value\n", "path": "src/virtualenv/activation/via_template.py"}]} | 2,087 | 495 |
gh_patches_debug_24382 | rasdani/github-patches | git_diff | beetbox__beets-4390 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Import error with reflink enabled and importadded plugin
<!--
Describe your problem, feature request, or discussion topic here.
Please fill out this and the "Setup" section below and remember to include
enough detail so that other people can reproduce the problem.
-->
### Problem
First of all, many thanks for adding reflink option! Very promising feature. But in some cases it causes an error. E.g. with ImportAdded plugin enabled.
Running this command in verbose (`-vv`) mode:
```sh
$ beet -vv import [absolutely any music catalogue]
```
Led to this problem:
```
[probably non-related output is redacted]
Sending event: album_imported
Traceback (most recent call last):
File "/usr/bin/beet", line 33, in <module>
sys.exit(load_entry_point('beets==1.6.0', 'console_scripts', 'beet')())
File "/usr/lib/python3.10/site-packages/beets/ui/__init__.py", line 1285, in main
_raw_main(args)
File "/usr/lib/python3.10/site-packages/beets/ui/__init__.py", line 1272, in _raw_main
subcommand.func(lib, suboptions, subargs)
File "/usr/lib/python3.10/site-packages/beets/ui/commands.py", line 973, in import_func
import_files(lib, paths, query)
File "/usr/lib/python3.10/site-packages/beets/ui/commands.py", line 943, in import_files
session.run()
File "/usr/lib/python3.10/site-packages/beets/importer.py", line 340, in run
pl.run_parallel(QUEUE_SIZE)
File "/usr/lib/python3.10/site-packages/beets/util/pipeline.py", line 446, in run_parallel
raise exc_info[1].with_traceback(exc_info[2])
File "/usr/lib/python3.10/site-packages/beets/util/pipeline.py", line 358, in run
self.coro.send(msg)
File "/usr/lib/python3.10/site-packages/beets/util/pipeline.py", line 170, in coro
task = func(*(args + (task,)))
File "/usr/lib/python3.10/site-packages/beets/importer.py", line 1573, in manipulate_files
task.finalize(session)
File "/usr/lib/python3.10/site-packages/beets/importer.py", line 612, in finalize
self._emit_imported(session.lib)
File "/usr/lib/python3.10/site-packages/beets/importer.py", line 638, in _emit_imported
plugins.send('album_imported', lib=lib, album=self.album)
File "/usr/lib/python3.10/site-packages/beets/plugins.py", line 488, in send
result = handler(**arguments)
File "/usr/lib/python3.10/site-packages/beets/plugins.py", line 145, in wrapper
return func(*args, **kwargs)
File "/usr/lib/python3.10/site-packages/beetsplug/importadded.py", line 105, in update_album_times
album.added = min(album_mtimes)
ValueError: min() arg is an empty sequence
```
### Setup
* OS: ArchLinux
* Python version: 3.10.5
* beets version: 1.6.0
* Turning off plugins made problem go away (yes/no): yes, importadded plugin
<!--
You can turn off plugins temporarily by passing --plugins= on the command line:
$ beet --plugins= version
-->
My configuration (output of `beet config`) is:
```yaml
directory: /storage/music/show-reflink-bug
library: ~/.datasets/metadata/beets/show-reflink-bug.db.db
plugins: [importadded]
import:
reflink: yes
write: yes
importadded:
preserve_mtimes: no
preserve_write_mtimes: no
```
</issue>
<code>
[start of beetsplug/importadded.py]
1 """Populate an item's `added` and `mtime` fields by using the file
2 modification time (mtime) of the item's source file before import.
3
4 Reimported albums and items are skipped.
5 """
6
7 import os
8
9 from beets import util
10 from beets import importer
11 from beets.plugins import BeetsPlugin
12
13
14 class ImportAddedPlugin(BeetsPlugin):
15 def __init__(self):
16 super().__init__()
17 self.config.add({
18 'preserve_mtimes': False,
19 'preserve_write_mtimes': False,
20 })
21
22 # item.id for new items that were reimported
23 self.reimported_item_ids = None
24 # album.path for old albums that were replaced by a reimported album
25 self.replaced_album_paths = None
26 # item path in the library to the mtime of the source file
27 self.item_mtime = {}
28
29 register = self.register_listener
30 register('import_task_created', self.check_config)
31 register('import_task_created', self.record_if_inplace)
32 register('import_task_files', self.record_reimported)
33 register('before_item_moved', self.record_import_mtime)
34 register('item_copied', self.record_import_mtime)
35 register('item_linked', self.record_import_mtime)
36 register('item_hardlinked', self.record_import_mtime)
37 register('album_imported', self.update_album_times)
38 register('item_imported', self.update_item_times)
39 register('after_write', self.update_after_write_time)
40
41 def check_config(self, task, session):
42 self.config['preserve_mtimes'].get(bool)
43
44 def reimported_item(self, item):
45 return item.id in self.reimported_item_ids
46
47 def reimported_album(self, album):
48 return album.path in self.replaced_album_paths
49
50 def record_if_inplace(self, task, session):
51 if not (session.config['copy'] or session.config['move'] or
52 session.config['link'] or session.config['hardlink']):
53 self._log.debug("In place import detected, recording mtimes from "
54 "source paths")
55 items = [task.item] \
56 if isinstance(task, importer.SingletonImportTask) \
57 else task.items
58 for item in items:
59 self.record_import_mtime(item, item.path, item.path)
60
61 def record_reimported(self, task, session):
62 self.reimported_item_ids = {item.id for item, replaced_items
63 in task.replaced_items.items()
64 if replaced_items}
65 self.replaced_album_paths = set(task.replaced_albums.keys())
66
67 def write_file_mtime(self, path, mtime):
68 """Write the given mtime to the destination path.
69 """
70 stat = os.stat(util.syspath(path))
71 os.utime(util.syspath(path), (stat.st_atime, mtime))
72
73 def write_item_mtime(self, item, mtime):
74 """Write the given mtime to an item's `mtime` field and to the mtime
75 of the item's file.
76 """
77 # The file's mtime on disk must be in sync with the item's mtime
78 self.write_file_mtime(util.syspath(item.path), mtime)
79 item.mtime = mtime
80
81 def record_import_mtime(self, item, source, destination):
82 """Record the file mtime of an item's path before its import.
83 """
84 mtime = os.stat(util.syspath(source)).st_mtime
85 self.item_mtime[destination] = mtime
86 self._log.debug("Recorded mtime {0} for item '{1}' imported from "
87 "'{2}'", mtime, util.displayable_path(destination),
88 util.displayable_path(source))
89
90 def update_album_times(self, lib, album):
91 if self.reimported_album(album):
92 self._log.debug("Album '{0}' is reimported, skipping import of "
93 "added dates for the album and its items.",
94 util.displayable_path(album.path))
95 return
96
97 album_mtimes = []
98 for item in album.items():
99 mtime = self.item_mtime.pop(item.path, None)
100 if mtime:
101 album_mtimes.append(mtime)
102 if self.config['preserve_mtimes'].get(bool):
103 self.write_item_mtime(item, mtime)
104 item.store()
105 album.added = min(album_mtimes)
106 self._log.debug("Import of album '{0}', selected album.added={1} "
107 "from item file mtimes.", album.album, album.added)
108 album.store()
109
110 def update_item_times(self, lib, item):
111 if self.reimported_item(item):
112 self._log.debug("Item '{0}' is reimported, skipping import of "
113 "added date.", util.displayable_path(item.path))
114 return
115 mtime = self.item_mtime.pop(item.path, None)
116 if mtime:
117 item.added = mtime
118 if self.config['preserve_mtimes'].get(bool):
119 self.write_item_mtime(item, mtime)
120 self._log.debug("Import of item '{0}', selected item.added={1}",
121 util.displayable_path(item.path), item.added)
122 item.store()
123
124 def update_after_write_time(self, item, path):
125 """Update the mtime of the item's file with the item.added value
126 after each write of the item if `preserve_write_mtimes` is enabled.
127 """
128 if item.added:
129 if self.config['preserve_write_mtimes'].get(bool):
130 self.write_item_mtime(item, item.added)
131 self._log.debug("Write of item '{0}', selected item.added={1}",
132 util.displayable_path(item.path), item.added)
133
[end of beetsplug/importadded.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/beetsplug/importadded.py b/beetsplug/importadded.py
--- a/beetsplug/importadded.py
+++ b/beetsplug/importadded.py
@@ -34,6 +34,7 @@
register('item_copied', self.record_import_mtime)
register('item_linked', self.record_import_mtime)
register('item_hardlinked', self.record_import_mtime)
+ register('item_reflinked', self.record_import_mtime)
register('album_imported', self.update_album_times)
register('item_imported', self.update_item_times)
register('after_write', self.update_after_write_time)
@@ -49,7 +50,8 @@
def record_if_inplace(self, task, session):
if not (session.config['copy'] or session.config['move'] or
- session.config['link'] or session.config['hardlink']):
+ session.config['link'] or session.config['hardlink'] or
+ session.config['reflink']):
self._log.debug("In place import detected, recording mtimes from "
"source paths")
items = [task.item] \
| {"golden_diff": "diff --git a/beetsplug/importadded.py b/beetsplug/importadded.py\n--- a/beetsplug/importadded.py\n+++ b/beetsplug/importadded.py\n@@ -34,6 +34,7 @@\n register('item_copied', self.record_import_mtime)\n register('item_linked', self.record_import_mtime)\n register('item_hardlinked', self.record_import_mtime)\n+ register('item_reflinked', self.record_import_mtime)\n register('album_imported', self.update_album_times)\n register('item_imported', self.update_item_times)\n register('after_write', self.update_after_write_time)\n@@ -49,7 +50,8 @@\n \n def record_if_inplace(self, task, session):\n if not (session.config['copy'] or session.config['move'] or\n- session.config['link'] or session.config['hardlink']):\n+ session.config['link'] or session.config['hardlink'] or\n+ session.config['reflink']):\n self._log.debug(\"In place import detected, recording mtimes from \"\n \"source paths\")\n items = [task.item] \\\n", "issue": "Import error with reflink enabled and importadded plugin\n<!--\r\nDescribe your problem, feature request, or discussion topic here.\r\n\r\nPlease fill out this and the \"Setup\" section below and remember to include\r\nenough detail so that other people can reproduce the problem.\r\n-->\r\n\r\n### Problem\r\nFirst of all, many thanks for adding reflink option! Very promising feature. But in some cases it causes an error. E.g. with ImportAdded plugin enabled.\r\n\r\nRunning this command in verbose (`-vv`) mode:\r\n\r\n```sh\r\n$ beet -vv import [absolutely any music catalogue]\r\n```\r\n\r\nLed to this problem:\r\n\r\n```\r\n[probably non-related output is redacted]\r\n\r\nSending event: album_imported\r\nTraceback (most recent call last):\r\n File \"/usr/bin/beet\", line 33, in <module>\r\n sys.exit(load_entry_point('beets==1.6.0', 'console_scripts', 'beet')())\r\n File \"/usr/lib/python3.10/site-packages/beets/ui/__init__.py\", line 1285, in main\r\n _raw_main(args)\r\n File \"/usr/lib/python3.10/site-packages/beets/ui/__init__.py\", line 1272, in _raw_main\r\n subcommand.func(lib, suboptions, subargs)\r\n File \"/usr/lib/python3.10/site-packages/beets/ui/commands.py\", line 973, in import_func\r\n import_files(lib, paths, query)\r\n File \"/usr/lib/python3.10/site-packages/beets/ui/commands.py\", line 943, in import_files\r\n session.run()\r\n File \"/usr/lib/python3.10/site-packages/beets/importer.py\", line 340, in run\r\n pl.run_parallel(QUEUE_SIZE)\r\n File \"/usr/lib/python3.10/site-packages/beets/util/pipeline.py\", line 446, in run_parallel\r\n raise exc_info[1].with_traceback(exc_info[2])\r\n File \"/usr/lib/python3.10/site-packages/beets/util/pipeline.py\", line 358, in run\r\n self.coro.send(msg)\r\n File \"/usr/lib/python3.10/site-packages/beets/util/pipeline.py\", line 170, in coro\r\n task = func(*(args + (task,)))\r\n File \"/usr/lib/python3.10/site-packages/beets/importer.py\", line 1573, in manipulate_files\r\n task.finalize(session)\r\n File \"/usr/lib/python3.10/site-packages/beets/importer.py\", line 612, in finalize\r\n self._emit_imported(session.lib)\r\n File \"/usr/lib/python3.10/site-packages/beets/importer.py\", line 638, in _emit_imported\r\n plugins.send('album_imported', lib=lib, album=self.album)\r\n File \"/usr/lib/python3.10/site-packages/beets/plugins.py\", line 488, in send\r\n result = handler(**arguments)\r\n File \"/usr/lib/python3.10/site-packages/beets/plugins.py\", line 145, in wrapper\r\n return func(*args, **kwargs)\r\n File \"/usr/lib/python3.10/site-packages/beetsplug/importadded.py\", line 105, in update_album_times\r\n album.added = min(album_mtimes)\r\nValueError: min() arg is an empty sequence\r\n\r\n```\r\n\r\n### Setup\r\n\r\n* OS: ArchLinux\r\n* Python version: 3.10.5\r\n* beets version: 1.6.0\r\n* Turning off plugins made problem go away (yes/no): yes, importadded plugin\r\n\r\n<!--\r\nYou can turn off plugins temporarily by passing --plugins= on the command line:\r\n\r\n$ beet --plugins= version\r\n-->\r\n\r\nMy configuration (output of `beet config`) is:\r\n\r\n```yaml\r\ndirectory: /storage/music/show-reflink-bug\r\nlibrary: ~/.datasets/metadata/beets/show-reflink-bug.db.db\r\n\r\nplugins: [importadded]\r\n\r\nimport:\r\n reflink: yes\r\n write: yes\r\nimportadded:\r\n preserve_mtimes: no\r\n preserve_write_mtimes: no\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Populate an item's `added` and `mtime` fields by using the file\nmodification time (mtime) of the item's source file before import.\n\nReimported albums and items are skipped.\n\"\"\"\n\nimport os\n\nfrom beets import util\nfrom beets import importer\nfrom beets.plugins import BeetsPlugin\n\n\nclass ImportAddedPlugin(BeetsPlugin):\n def __init__(self):\n super().__init__()\n self.config.add({\n 'preserve_mtimes': False,\n 'preserve_write_mtimes': False,\n })\n\n # item.id for new items that were reimported\n self.reimported_item_ids = None\n # album.path for old albums that were replaced by a reimported album\n self.replaced_album_paths = None\n # item path in the library to the mtime of the source file\n self.item_mtime = {}\n\n register = self.register_listener\n register('import_task_created', self.check_config)\n register('import_task_created', self.record_if_inplace)\n register('import_task_files', self.record_reimported)\n register('before_item_moved', self.record_import_mtime)\n register('item_copied', self.record_import_mtime)\n register('item_linked', self.record_import_mtime)\n register('item_hardlinked', self.record_import_mtime)\n register('album_imported', self.update_album_times)\n register('item_imported', self.update_item_times)\n register('after_write', self.update_after_write_time)\n\n def check_config(self, task, session):\n self.config['preserve_mtimes'].get(bool)\n\n def reimported_item(self, item):\n return item.id in self.reimported_item_ids\n\n def reimported_album(self, album):\n return album.path in self.replaced_album_paths\n\n def record_if_inplace(self, task, session):\n if not (session.config['copy'] or session.config['move'] or\n session.config['link'] or session.config['hardlink']):\n self._log.debug(\"In place import detected, recording mtimes from \"\n \"source paths\")\n items = [task.item] \\\n if isinstance(task, importer.SingletonImportTask) \\\n else task.items\n for item in items:\n self.record_import_mtime(item, item.path, item.path)\n\n def record_reimported(self, task, session):\n self.reimported_item_ids = {item.id for item, replaced_items\n in task.replaced_items.items()\n if replaced_items}\n self.replaced_album_paths = set(task.replaced_albums.keys())\n\n def write_file_mtime(self, path, mtime):\n \"\"\"Write the given mtime to the destination path.\n \"\"\"\n stat = os.stat(util.syspath(path))\n os.utime(util.syspath(path), (stat.st_atime, mtime))\n\n def write_item_mtime(self, item, mtime):\n \"\"\"Write the given mtime to an item's `mtime` field and to the mtime\n of the item's file.\n \"\"\"\n # The file's mtime on disk must be in sync with the item's mtime\n self.write_file_mtime(util.syspath(item.path), mtime)\n item.mtime = mtime\n\n def record_import_mtime(self, item, source, destination):\n \"\"\"Record the file mtime of an item's path before its import.\n \"\"\"\n mtime = os.stat(util.syspath(source)).st_mtime\n self.item_mtime[destination] = mtime\n self._log.debug(\"Recorded mtime {0} for item '{1}' imported from \"\n \"'{2}'\", mtime, util.displayable_path(destination),\n util.displayable_path(source))\n\n def update_album_times(self, lib, album):\n if self.reimported_album(album):\n self._log.debug(\"Album '{0}' is reimported, skipping import of \"\n \"added dates for the album and its items.\",\n util.displayable_path(album.path))\n return\n\n album_mtimes = []\n for item in album.items():\n mtime = self.item_mtime.pop(item.path, None)\n if mtime:\n album_mtimes.append(mtime)\n if self.config['preserve_mtimes'].get(bool):\n self.write_item_mtime(item, mtime)\n item.store()\n album.added = min(album_mtimes)\n self._log.debug(\"Import of album '{0}', selected album.added={1} \"\n \"from item file mtimes.\", album.album, album.added)\n album.store()\n\n def update_item_times(self, lib, item):\n if self.reimported_item(item):\n self._log.debug(\"Item '{0}' is reimported, skipping import of \"\n \"added date.\", util.displayable_path(item.path))\n return\n mtime = self.item_mtime.pop(item.path, None)\n if mtime:\n item.added = mtime\n if self.config['preserve_mtimes'].get(bool):\n self.write_item_mtime(item, mtime)\n self._log.debug(\"Import of item '{0}', selected item.added={1}\",\n util.displayable_path(item.path), item.added)\n item.store()\n\n def update_after_write_time(self, item, path):\n \"\"\"Update the mtime of the item's file with the item.added value\n after each write of the item if `preserve_write_mtimes` is enabled.\n \"\"\"\n if item.added:\n if self.config['preserve_write_mtimes'].get(bool):\n self.write_item_mtime(item, item.added)\n self._log.debug(\"Write of item '{0}', selected item.added={1}\",\n util.displayable_path(item.path), item.added)\n", "path": "beetsplug/importadded.py"}]} | 2,939 | 242 |
gh_patches_debug_2721 | rasdani/github-patches | git_diff | benoitc__gunicorn-1708 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
gunicorn crashed on start with --reload flag
Setup: Vagrant, virtualenv, gunicorn 19.3.0:
The following command produces this stack:
`gunicorn -c /data/shared/api/gunicorn_config.py -b unix:/tmp/api-dev-gunicorn.sock --log-level INFO --reload wsgi:app`
```
Exception in thread Thread-1:
Traceback (most recent call last):
File "/home/vagrant/.pyenv/versions/2.7.6/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/data/virtualenv/default/lib/python2.7/site-packages/gunicorn/reloader.py", line 41, in run
for filename in self.get_files():
File "/data/virtualenv/default/lib/python2.7/site-packages/gunicorn/reloader.py", line 30, in get_files
if hasattr(module, '__file__')
File "/data/virtualenv/default/lib/python2.7/re.py", line 151, in sub
return _compile(pattern, flags).sub(repl, string, count)
TypeError: expected string or buffer
```
If I remove --reload it boots up fine.
</issue>
<code>
[start of gunicorn/reloader.py]
1 # -*- coding: utf-8 -
2 #
3 # This file is part of gunicorn released under the MIT license.
4 # See the NOTICE for more information.
5
6 import os
7 import os.path
8 import re
9 import sys
10 import time
11 import threading
12
13
14 class Reloader(threading.Thread):
15 def __init__(self, extra_files=None, interval=1, callback=None):
16 super(Reloader, self).__init__()
17 self.setDaemon(True)
18 self._extra_files = set(extra_files or ())
19 self._extra_files_lock = threading.RLock()
20 self._interval = interval
21 self._callback = callback
22
23 def add_extra_file(self, filename):
24 with self._extra_files_lock:
25 self._extra_files.add(filename)
26
27 def get_files(self):
28 fnames = [
29 re.sub('py[co]$', 'py', module.__file__)
30 for module in list(sys.modules.values())
31 if hasattr(module, '__file__')
32 ]
33
34 with self._extra_files_lock:
35 fnames.extend(self._extra_files)
36
37 return fnames
38
39 def run(self):
40 mtimes = {}
41 while True:
42 for filename in self.get_files():
43 try:
44 mtime = os.stat(filename).st_mtime
45 except OSError:
46 continue
47 old_time = mtimes.get(filename)
48 if old_time is None:
49 mtimes[filename] = mtime
50 continue
51 elif mtime > old_time:
52 if self._callback:
53 self._callback(filename)
54 time.sleep(self._interval)
55
56 has_inotify = False
57 if sys.platform.startswith('linux'):
58 try:
59 from inotify.adapters import Inotify
60 import inotify.constants
61 has_inotify = True
62 except ImportError:
63 pass
64
65
66 if has_inotify:
67
68 class InotifyReloader(threading.Thread):
69 event_mask = (inotify.constants.IN_CREATE | inotify.constants.IN_DELETE
70 | inotify.constants.IN_DELETE_SELF | inotify.constants.IN_MODIFY
71 | inotify.constants.IN_MOVE_SELF | inotify.constants.IN_MOVED_FROM
72 | inotify.constants.IN_MOVED_TO)
73
74 def __init__(self, extra_files=None, callback=None):
75 super(InotifyReloader, self).__init__()
76 self.setDaemon(True)
77 self._callback = callback
78 self._dirs = set()
79 self._watcher = Inotify()
80
81 for extra_file in extra_files:
82 self.add_extra_file(extra_file)
83
84 def add_extra_file(self, filename):
85 dirname = os.path.dirname(filename)
86
87 if dirname in self._dirs:
88 return
89
90 self._watcher.add_watch(dirname, mask=self.event_mask)
91 self._dirs.add(dirname)
92
93 def get_dirs(self):
94 fnames = [
95 os.path.dirname(re.sub('py[co]$', 'py', module.__file__))
96 for module in list(sys.modules.values())
97 if hasattr(module, '__file__')
98 ]
99
100 return set(fnames)
101
102 def run(self):
103 self._dirs = self.get_dirs()
104
105 for dirname in self._dirs:
106 self._watcher.add_watch(dirname, mask=self.event_mask)
107
108 for event in self._watcher.event_gen():
109 if event is None:
110 continue
111
112 filename = event[3]
113
114 self._callback(filename)
115
116 else:
117
118 class InotifyReloader(object):
119 def __init__(self, callback=None):
120 raise ImportError('You must have the inotify module installed to '
121 'use the inotify reloader')
122
123
124 preferred_reloader = InotifyReloader if has_inotify else Reloader
125
126 reloader_engines = {
127 'auto': preferred_reloader,
128 'poll': Reloader,
129 'inotify': InotifyReloader,
130 }
131
[end of gunicorn/reloader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gunicorn/reloader.py b/gunicorn/reloader.py
--- a/gunicorn/reloader.py
+++ b/gunicorn/reloader.py
@@ -28,7 +28,7 @@
fnames = [
re.sub('py[co]$', 'py', module.__file__)
for module in list(sys.modules.values())
- if hasattr(module, '__file__')
+ if getattr(module, '__file__', None)
]
with self._extra_files_lock:
| {"golden_diff": "diff --git a/gunicorn/reloader.py b/gunicorn/reloader.py\n--- a/gunicorn/reloader.py\n+++ b/gunicorn/reloader.py\n@@ -28,7 +28,7 @@\n fnames = [\n re.sub('py[co]$', 'py', module.__file__)\n for module in list(sys.modules.values())\n- if hasattr(module, '__file__')\n+ if getattr(module, '__file__', None)\n ]\n \n with self._extra_files_lock:\n", "issue": "gunicorn crashed on start with --reload flag\nSetup: Vagrant, virtualenv, gunicorn 19.3.0:\n\nThe following command produces this stack:\n\n`gunicorn -c /data/shared/api/gunicorn_config.py -b unix:/tmp/api-dev-gunicorn.sock --log-level INFO --reload wsgi:app`\n\n```\nException in thread Thread-1:\nTraceback (most recent call last):\n File \"/home/vagrant/.pyenv/versions/2.7.6/lib/python2.7/threading.py\", line 810, in __bootstrap_inner\n self.run()\n File \"/data/virtualenv/default/lib/python2.7/site-packages/gunicorn/reloader.py\", line 41, in run\n for filename in self.get_files():\n File \"/data/virtualenv/default/lib/python2.7/site-packages/gunicorn/reloader.py\", line 30, in get_files\n if hasattr(module, '__file__')\n File \"/data/virtualenv/default/lib/python2.7/re.py\", line 151, in sub\n return _compile(pattern, flags).sub(repl, string, count)\nTypeError: expected string or buffer\n```\n\nIf I remove --reload it boots up fine.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nimport os\nimport os.path\nimport re\nimport sys\nimport time\nimport threading\n\n\nclass Reloader(threading.Thread):\n def __init__(self, extra_files=None, interval=1, callback=None):\n super(Reloader, self).__init__()\n self.setDaemon(True)\n self._extra_files = set(extra_files or ())\n self._extra_files_lock = threading.RLock()\n self._interval = interval\n self._callback = callback\n\n def add_extra_file(self, filename):\n with self._extra_files_lock:\n self._extra_files.add(filename)\n\n def get_files(self):\n fnames = [\n re.sub('py[co]$', 'py', module.__file__)\n for module in list(sys.modules.values())\n if hasattr(module, '__file__')\n ]\n\n with self._extra_files_lock:\n fnames.extend(self._extra_files)\n\n return fnames\n\n def run(self):\n mtimes = {}\n while True:\n for filename in self.get_files():\n try:\n mtime = os.stat(filename).st_mtime\n except OSError:\n continue\n old_time = mtimes.get(filename)\n if old_time is None:\n mtimes[filename] = mtime\n continue\n elif mtime > old_time:\n if self._callback:\n self._callback(filename)\n time.sleep(self._interval)\n\nhas_inotify = False\nif sys.platform.startswith('linux'):\n try:\n from inotify.adapters import Inotify\n import inotify.constants\n has_inotify = True\n except ImportError:\n pass\n\n\nif has_inotify:\n\n class InotifyReloader(threading.Thread):\n event_mask = (inotify.constants.IN_CREATE | inotify.constants.IN_DELETE\n | inotify.constants.IN_DELETE_SELF | inotify.constants.IN_MODIFY\n | inotify.constants.IN_MOVE_SELF | inotify.constants.IN_MOVED_FROM\n | inotify.constants.IN_MOVED_TO)\n\n def __init__(self, extra_files=None, callback=None):\n super(InotifyReloader, self).__init__()\n self.setDaemon(True)\n self._callback = callback\n self._dirs = set()\n self._watcher = Inotify()\n\n for extra_file in extra_files:\n self.add_extra_file(extra_file)\n\n def add_extra_file(self, filename):\n dirname = os.path.dirname(filename)\n\n if dirname in self._dirs:\n return\n\n self._watcher.add_watch(dirname, mask=self.event_mask)\n self._dirs.add(dirname)\n\n def get_dirs(self):\n fnames = [\n os.path.dirname(re.sub('py[co]$', 'py', module.__file__))\n for module in list(sys.modules.values())\n if hasattr(module, '__file__')\n ]\n\n return set(fnames)\n\n def run(self):\n self._dirs = self.get_dirs()\n\n for dirname in self._dirs:\n self._watcher.add_watch(dirname, mask=self.event_mask)\n\n for event in self._watcher.event_gen():\n if event is None:\n continue\n\n filename = event[3]\n\n self._callback(filename)\n\nelse:\n\n class InotifyReloader(object):\n def __init__(self, callback=None):\n raise ImportError('You must have the inotify module installed to '\n 'use the inotify reloader')\n\n\npreferred_reloader = InotifyReloader if has_inotify else Reloader\n\nreloader_engines = {\n 'auto': preferred_reloader,\n 'poll': Reloader,\n 'inotify': InotifyReloader,\n}\n", "path": "gunicorn/reloader.py"}]} | 1,888 | 106 |
gh_patches_debug_4526 | rasdani/github-patches | git_diff | Mailu__Mailu-929 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Typo in migration file
Looking at logs I found a typo in one of the migration files. I don't really know if it is possible to change migration files manually so I'm not writing a pull request but a bug report ;)
https://github.com/Mailu/Mailu/blob/c484c5326b17877bc66a454970cf1342f956d47e/core/admin/migrations/versions/5aeb5811408e_.py#L109
This should probably read relay_table.
Typo in migration file
Looking at logs I found a typo in one of the migration files. I don't really know if it is possible to change migration files manually so I'm not writing a pull request but a bug report ;)
https://github.com/Mailu/Mailu/blob/c484c5326b17877bc66a454970cf1342f956d47e/core/admin/migrations/versions/5aeb5811408e_.py#L109
This should probably read relay_table.
</issue>
<code>
[start of core/admin/migrations/versions/5aeb5811408e_.py]
1 """ Convert all domains and emails to lowercase
2
3 Revision ID: 5aeb5811408e
4 Revises: cd79ed46d9c2
5 Create Date: 2018-12-06 16:07:23.380579
6
7 """
8
9 # revision identifiers, used by Alembic.
10 revision = '5aeb5811408e'
11 down_revision = 'f1393877871d'
12
13 from alembic import op, config
14 import sqlalchemy as sa
15
16
17 name_column = lambda: sa.Column('name', sa.String(80), primary_key=True)
18 domain_name_column = lambda: sa.Column('domain_name', sa.String(80))
19 user_email_column = lambda: sa.Column('user_email', sa.String(255))
20 email_columns = lambda: [
21 sa.Column('email', sa.String(255), primary_key=True),
22 sa.Column('localpart', sa.String(80)),
23 domain_name_column()
24 ]
25 id_columns = lambda: [
26 sa.Column('id', sa.Integer(), primary_key=True),
27 user_email_column()
28 ]
29
30
31 domain_table = sa.Table('domain', sa.MetaData(), name_column())
32 relay_table = sa.Table('relay', sa.MetaData(), name_column())
33 alternative_table = sa.Table('alternative', sa.MetaData(), name_column(), domain_name_column())
34 user_table = sa.Table('user', sa.MetaData(), *email_columns())
35 alias_table = sa.Table('alias', sa.MetaData(), *email_columns())
36 fetch_table = sa.Table('fetch', sa.MetaData(), *id_columns())
37 token_table = sa.Table('token', sa.MetaData(), *id_columns())
38 manager_table = sa.Table('manager', sa.MetaData(), domain_name_column(), user_email_column())
39
40
41 def upgrade():
42 connection = op.get_bind()
43
44 # drop foreign key constraints
45 with op.batch_alter_table('alias') as batch_op:
46 batch_op.drop_constraint('alias_domain_name_fkey', type_='foreignkey')
47 with op.batch_alter_table('alternative') as batch_op:
48 batch_op.drop_constraint('alternative_domain_name_fkey', type_='foreignkey')
49 with op.batch_alter_table('manager') as batch_op:
50 batch_op.drop_constraint('manager_domain_name_fkey', type_='foreignkey')
51 batch_op.drop_constraint('manager_user_email_fkey', type_='foreignkey')
52 with op.batch_alter_table('token') as batch_op:
53 batch_op.drop_constraint('token_user_email_fkey', type_='foreignkey')
54 with op.batch_alter_table('fetch') as batch_op:
55 batch_op.drop_constraint('fetch_user_email_fkey', type_='foreignkey')
56 with op.batch_alter_table('user') as batch_op:
57 batch_op.drop_constraint('user_domain_name_fkey', type_='foreignkey')
58
59 # lower domain names
60 for domain in connection.execute(domain_table.select()):
61 connection.execute(domain_table.update().where(
62 domain_table.c.name == domain.name
63 ).values(
64 name=domain.name.lower()
65 ))
66 # lower alternatives
67 for alternative in connection.execute(alternative_table.select()):
68 connection.execute(alternative_table.update().where(
69 alternative_table.c.name == alternative.name
70 ).values(
71 name=alternative.name.lower(),
72 domain_name=alternative.domain_name.lower()
73 ))
74 # lower users
75 for user in connection.execute(user_table.select()):
76 connection.execute(user_table.update().where(
77 user_table.c.email == user.email
78 ).values(
79 email=user.email.lower(),
80 localpart=user.localpart.lower(),
81 domain_name=user.domain_name.lower()
82 ))
83 # lower aliases
84 for alias in connection.execute(alias_table.select()):
85 connection.execute(alias_table.update().where(
86 alias_table.c.email == alias.email
87 ).values(
88 email=alias.email.lower(),
89 localpart=alias.localpart.lower(),
90 domain_name=alias.domain_name.lower()
91 ))
92 # lower fetches
93 for fetch in connection.execute(fetch_table.select()):
94 connection.execute(fetch_table.update().where(
95 fetch_table.c.id == fetch.id
96 ).values(
97 user_email=fetch.user_email.lower()
98 ))
99 # lower tokens
100 for token in connection.execute(token_table.select()):
101 connection.execute(token_table.update().where(
102 token_table.c.id == token.id
103 ).values(
104 user_email=token.user_email.lower()
105 ))
106 # lower relays
107 for relay in connection.execute(relay_table.select()):
108 connection.execute(relay_table.update().where(
109 relay_tbale.c.name == relay.name
110 ).values(
111 name=relay.name.lower()
112 ))
113 # lower managers
114 for manager in connection.execute(manager_table.select()):
115 connection.execute(manager_table.update().where(
116 sa.and_(
117 manager_table.c.domain_name == manager.domain_name,
118 manager_table.c.user_email == manager.user_email
119 )
120 ).values(
121 domain_name=manager.domain_name.lower(),
122 user_email=manager.user_email.lower()
123 ))
124
125 # restore foreign key constraints
126 with op.batch_alter_table('alias') as batch_op:
127 batch_op.create_foreign_key('alias_domain_name_fkey', 'domain', ['domain_name'], ['name'])
128 with op.batch_alter_table('user') as batch_op:
129 batch_op.create_foreign_key('user_domain_name_fkey', 'domain', ['domain_name'], ['name'])
130 with op.batch_alter_table('alternative') as batch_op:
131 batch_op.create_foreign_key('alternative_domain_name_fkey', 'domain', ['domain_name'], ['name'])
132 with op.batch_alter_table('manager') as batch_op:
133 batch_op.create_foreign_key('manager_domain_name_fkey', 'domain', ['domain_name'], ['name'])
134 batch_op.create_foreign_key('manager_user_email_fkey', 'user', ['user_email'], ['email'])
135 with op.batch_alter_table('token') as batch_op:
136 batch_op.create_foreign_key('token_user_email_fkey', 'user', ['user_email'], ['email'])
137 with op.batch_alter_table('fetch') as batch_op:
138 batch_op.create_foreign_key('fetch_user_email_fkey', 'user', ['user_email'], ['email'])
139
140
141 def downgrade():
142 pass
143
[end of core/admin/migrations/versions/5aeb5811408e_.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/admin/migrations/versions/5aeb5811408e_.py b/core/admin/migrations/versions/5aeb5811408e_.py
--- a/core/admin/migrations/versions/5aeb5811408e_.py
+++ b/core/admin/migrations/versions/5aeb5811408e_.py
@@ -106,7 +106,7 @@
# lower relays
for relay in connection.execute(relay_table.select()):
connection.execute(relay_table.update().where(
- relay_tbale.c.name == relay.name
+ relay_table.c.name == relay.name
).values(
name=relay.name.lower()
))
| {"golden_diff": "diff --git a/core/admin/migrations/versions/5aeb5811408e_.py b/core/admin/migrations/versions/5aeb5811408e_.py\n--- a/core/admin/migrations/versions/5aeb5811408e_.py\n+++ b/core/admin/migrations/versions/5aeb5811408e_.py\n@@ -106,7 +106,7 @@\n # lower relays\n for relay in connection.execute(relay_table.select()):\n connection.execute(relay_table.update().where(\n- relay_tbale.c.name == relay.name\n+ relay_table.c.name == relay.name\n ).values(\n name=relay.name.lower()\n ))\n", "issue": "Typo in migration file\nLooking at logs I found a typo in one of the migration files. I don't really know if it is possible to change migration files manually so I'm not writing a pull request but a bug report ;)\r\n\r\nhttps://github.com/Mailu/Mailu/blob/c484c5326b17877bc66a454970cf1342f956d47e/core/admin/migrations/versions/5aeb5811408e_.py#L109\r\n\r\nThis should probably read relay_table.\nTypo in migration file\nLooking at logs I found a typo in one of the migration files. I don't really know if it is possible to change migration files manually so I'm not writing a pull request but a bug report ;)\r\n\r\nhttps://github.com/Mailu/Mailu/blob/c484c5326b17877bc66a454970cf1342f956d47e/core/admin/migrations/versions/5aeb5811408e_.py#L109\r\n\r\nThis should probably read relay_table.\n", "before_files": [{"content": "\"\"\" Convert all domains and emails to lowercase\n\nRevision ID: 5aeb5811408e\nRevises: cd79ed46d9c2\nCreate Date: 2018-12-06 16:07:23.380579\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '5aeb5811408e'\ndown_revision = 'f1393877871d'\n\nfrom alembic import op, config\nimport sqlalchemy as sa\n\n\nname_column = lambda: sa.Column('name', sa.String(80), primary_key=True)\ndomain_name_column = lambda: sa.Column('domain_name', sa.String(80))\nuser_email_column = lambda: sa.Column('user_email', sa.String(255))\nemail_columns = lambda: [\n sa.Column('email', sa.String(255), primary_key=True),\n sa.Column('localpart', sa.String(80)),\n domain_name_column()\n]\nid_columns = lambda: [\n sa.Column('id', sa.Integer(), primary_key=True),\n user_email_column()\n]\n\n\ndomain_table = sa.Table('domain', sa.MetaData(), name_column())\nrelay_table = sa.Table('relay', sa.MetaData(), name_column())\nalternative_table = sa.Table('alternative', sa.MetaData(), name_column(), domain_name_column())\nuser_table = sa.Table('user', sa.MetaData(), *email_columns())\nalias_table = sa.Table('alias', sa.MetaData(), *email_columns())\nfetch_table = sa.Table('fetch', sa.MetaData(), *id_columns())\ntoken_table = sa.Table('token', sa.MetaData(), *id_columns())\nmanager_table = sa.Table('manager', sa.MetaData(), domain_name_column(), user_email_column())\n\n\ndef upgrade():\n connection = op.get_bind()\n\n # drop foreign key constraints\n with op.batch_alter_table('alias') as batch_op:\n batch_op.drop_constraint('alias_domain_name_fkey', type_='foreignkey')\n with op.batch_alter_table('alternative') as batch_op:\n batch_op.drop_constraint('alternative_domain_name_fkey', type_='foreignkey')\n with op.batch_alter_table('manager') as batch_op:\n batch_op.drop_constraint('manager_domain_name_fkey', type_='foreignkey')\n batch_op.drop_constraint('manager_user_email_fkey', type_='foreignkey')\n with op.batch_alter_table('token') as batch_op:\n batch_op.drop_constraint('token_user_email_fkey', type_='foreignkey')\n with op.batch_alter_table('fetch') as batch_op:\n batch_op.drop_constraint('fetch_user_email_fkey', type_='foreignkey')\n with op.batch_alter_table('user') as batch_op:\n batch_op.drop_constraint('user_domain_name_fkey', type_='foreignkey')\n\n # lower domain names\n for domain in connection.execute(domain_table.select()):\n connection.execute(domain_table.update().where(\n domain_table.c.name == domain.name\n ).values(\n name=domain.name.lower()\n ))\n # lower alternatives\n for alternative in connection.execute(alternative_table.select()):\n connection.execute(alternative_table.update().where(\n alternative_table.c.name == alternative.name\n ).values(\n name=alternative.name.lower(),\n domain_name=alternative.domain_name.lower()\n ))\n # lower users\n for user in connection.execute(user_table.select()):\n connection.execute(user_table.update().where(\n user_table.c.email == user.email\n ).values(\n email=user.email.lower(),\n localpart=user.localpart.lower(),\n domain_name=user.domain_name.lower()\n ))\n # lower aliases\n for alias in connection.execute(alias_table.select()):\n connection.execute(alias_table.update().where(\n alias_table.c.email == alias.email\n ).values(\n email=alias.email.lower(),\n localpart=alias.localpart.lower(),\n domain_name=alias.domain_name.lower()\n ))\n # lower fetches\n for fetch in connection.execute(fetch_table.select()):\n connection.execute(fetch_table.update().where(\n fetch_table.c.id == fetch.id\n ).values(\n user_email=fetch.user_email.lower()\n ))\n # lower tokens\n for token in connection.execute(token_table.select()):\n connection.execute(token_table.update().where(\n token_table.c.id == token.id\n ).values(\n user_email=token.user_email.lower()\n ))\n # lower relays\n for relay in connection.execute(relay_table.select()):\n connection.execute(relay_table.update().where(\n relay_tbale.c.name == relay.name\n ).values(\n name=relay.name.lower()\n ))\n # lower managers\n for manager in connection.execute(manager_table.select()):\n connection.execute(manager_table.update().where(\n sa.and_(\n manager_table.c.domain_name == manager.domain_name,\n manager_table.c.user_email == manager.user_email\n )\n ).values(\n domain_name=manager.domain_name.lower(),\n user_email=manager.user_email.lower()\n ))\n\n # restore foreign key constraints\n with op.batch_alter_table('alias') as batch_op:\n batch_op.create_foreign_key('alias_domain_name_fkey', 'domain', ['domain_name'], ['name'])\n with op.batch_alter_table('user') as batch_op:\n batch_op.create_foreign_key('user_domain_name_fkey', 'domain', ['domain_name'], ['name'])\n with op.batch_alter_table('alternative') as batch_op:\n batch_op.create_foreign_key('alternative_domain_name_fkey', 'domain', ['domain_name'], ['name'])\n with op.batch_alter_table('manager') as batch_op:\n batch_op.create_foreign_key('manager_domain_name_fkey', 'domain', ['domain_name'], ['name'])\n batch_op.create_foreign_key('manager_user_email_fkey', 'user', ['user_email'], ['email'])\n with op.batch_alter_table('token') as batch_op:\n batch_op.create_foreign_key('token_user_email_fkey', 'user', ['user_email'], ['email'])\n with op.batch_alter_table('fetch') as batch_op:\n batch_op.create_foreign_key('fetch_user_email_fkey', 'user', ['user_email'], ['email'])\n\n\ndef downgrade():\n pass\n", "path": "core/admin/migrations/versions/5aeb5811408e_.py"}]} | 2,479 | 167 |
gh_patches_debug_36005 | rasdani/github-patches | git_diff | fossasia__open-event-server-4818 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
We can create Sessions and Speakers even if it is disabled in the Event properties.
**Describe the bug**
We can create Sessions and Speakers even if the variable ```isSessionsSpeakersEnabled``` is set to false.
**To Reproduce**
Steps to reproduce the behavior:
1. Create an event with ```isSessionsSpeakersEnabled``` as false or update an existing one.
2. Now try to create a session or a speaker.
**Expected behavior**
It doesn't make sense to create sessions when it is disabled in the event.
**Additional context**
<!-- Add any other context about the problem here. -->


</issue>
<code>
[start of app/api/speakers.py]
1 from flask import request
2 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
3 from flask_rest_jsonapi.exceptions import ObjectNotFound
4
5 from app.api.bootstrap import api
6 from app.api.helpers.db import safe_query
7 from app.api.helpers.permission_manager import has_access
8 from app.api.helpers.query import event_query
9 from app.api.helpers.utilities import require_relationship
10 from app.api.schema.speakers import SpeakerSchema
11 from app.models import db
12 from app.models.event import Event
13 from app.models.session import Session
14 from app.models.speaker import Speaker
15 from app.models.user import User
16
17
18 class SpeakerListPost(ResourceList):
19 """
20 List and create speakers
21 """
22
23 def before_post(self, args, kwargs, data):
24 """
25 method to add user_id to view_kwargs before post
26 :param args:
27 :param kwargs:
28 :param data:
29 :return:
30 """
31 require_relationship(['event', 'user'], data)
32
33 if not has_access('is_coorganizer', event_id=data['event']):
34 event = safe_query(self, Event, 'id', data['event'], 'event_id')
35 if event.state == "draft":
36 raise ObjectNotFound({'parameter': 'event_id'},
37 "Event: {} not found".format(data['event_id']))
38
39 if 'sessions' in data:
40 session_ids = data['sessions']
41 for session_id in session_ids:
42 if not has_access('is_session_self_submitted', session_id=session_id):
43 raise ObjectNotFound({'parameter': 'session_id'},
44 "Session: {} not found".format(session_id))
45
46 schema = SpeakerSchema
47 methods = ['POST', ]
48 data_layer = {'session': db.session,
49 'model': Speaker
50 }
51
52
53 class SpeakerList(ResourceList):
54 """
55 List speakers based on different params from view_kwargs
56 """
57
58 def query(self, view_kwargs):
59 """
60 query method for speakers list class
61 :param view_kwargs:
62 :return:
63 """
64 query_ = self.session.query(Speaker)
65 query_ = event_query(self, query_, view_kwargs)
66
67 if view_kwargs.get('user_id'):
68 user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')
69 query_ = query_.join(User).filter(User.id == user.id)
70
71 if view_kwargs.get('session_id'):
72 session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')
73 # session-speaker :: many-to-many relationship
74 query_ = Speaker.query.filter(Speaker.sessions.any(id=session.id))
75 if 'Authorization' in request.headers and not has_access('is_coorganizer', event_id=session.event_id):
76 if not has_access('is_session_self_submitted', session_id=session.id):
77 query_ = query_.filter(Session.state == "approved" or Session.state == "accepted")
78
79 return query_
80
81 view_kwargs = True
82 schema = SpeakerSchema
83 methods = ['GET', ]
84 data_layer = {'session': db.session,
85 'model': Speaker,
86 'methods': {
87 'query': query,
88 }}
89
90
91 class SpeakerDetail(ResourceDetail):
92 """
93 Speakers Detail by id
94 """
95 decorators = (api.has_permission('is_coorganizer_or_user_itself', methods="PATCH,DELETE", fetch="event_id",
96 fetch_as="event_id", model=Speaker),)
97 schema = SpeakerSchema
98 data_layer = {'session': db.session,
99 'model': Speaker}
100
101
102 class SpeakerRelationshipRequired(ResourceRelationship):
103 """
104 Speaker Relationship class for required entities
105 """
106 decorators = (api.has_permission('is_coorganizer_or_user_itself', methods="PATCH,DELETE", fetch="event_id",
107 fetch_as="event_id", model=Speaker),)
108 methods = ['GET', 'PATCH']
109 schema = SpeakerSchema
110 data_layer = {'session': db.session,
111 'model': Speaker}
112
113
114 class SpeakerRelationshipOptional(ResourceRelationship):
115 """
116 Speaker Relationship class
117 """
118 decorators = (api.has_permission('is_coorganizer_or_user_itself', methods="PATCH,DELETE", fetch="event_id",
119 fetch_as="event_id", model=Speaker),)
120 schema = SpeakerSchema
121 data_layer = {'session': db.session,
122 'model': Speaker}
123
[end of app/api/speakers.py]
[start of app/api/sessions.py]
1 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
2
3 from app.api.bootstrap import api
4 from app.api.events import Event
5 from app.api.helpers.db import safe_query
6 from app.api.helpers.mail import send_email_new_session, send_email_session_accept_reject
7 from app.api.helpers.notification import send_notif_new_session_organizer, send_notif_session_accept_reject
8 from app.api.helpers.permissions import current_identity
9 from app.api.helpers.query import event_query
10 from app.api.helpers.utilities import require_relationship
11 from app.api.schema.sessions import SessionSchema
12 from app.models import db
13 from app.models.microlocation import Microlocation
14 from app.models.session import Session
15 from app.models.session_type import SessionType
16 from app.models.speaker import Speaker
17 from app.models.track import Track
18 from app.models.user import User
19 from app.settings import get_settings
20
21
22 class SessionListPost(ResourceList):
23 """
24 List Sessions
25 """
26 def before_post(self, args, kwargs, data):
27 """
28 before post method to check for required relationship and proper permission
29 :param args:
30 :param kwargs:
31 :param data:
32 :return:
33 """
34 require_relationship(['event'], data)
35 data['creator_id'] = current_identity.id
36
37 def after_create_object(self, session, data, view_kwargs):
38 """
39 method to send email for creation of new session
40 mails session link to the concerned user
41 :param session:
42 :param data:
43 :param view_kwargs:
44 :return:
45 """
46 if session.event.get_organizer():
47 event_name = session.event.name
48 organizer = session.event.get_organizer()
49 organizer_email = organizer.email
50 frontend_url = get_settings()['frontend_url']
51 link = "{}/events/{}/sessions/{}"\
52 .format(frontend_url, session.event_id, session.id)
53 send_email_new_session(organizer_email, event_name, link)
54 send_notif_new_session_organizer(organizer, event_name, link)
55
56 decorators = (api.has_permission('create_event'),)
57 schema = SessionSchema
58 data_layer = {'session': db.session,
59 'model': Session,
60 'methods': {'after_create_object': after_create_object
61 }}
62
63
64 class SessionList(ResourceList):
65 """
66 List Sessions
67 """
68
69 def query(self, view_kwargs):
70 """
71 query method for SessionList class
72 :param view_kwargs:
73 :return:
74 """
75 query_ = self.session.query(Session)
76 if view_kwargs.get('track_id') is not None:
77 track = safe_query(self, Track, 'id', view_kwargs['track_id'], 'track_id')
78 query_ = query_.join(Track).filter(Track.id == track.id)
79 if view_kwargs.get('session_type_id') is not None:
80 session_type = safe_query(self, SessionType, 'id', view_kwargs['session_type_id'], 'session_type_id')
81 query_ = query_.join(SessionType).filter(SessionType.id == session_type.id)
82 if view_kwargs.get('microlocation_id') is not None:
83 microlocation = safe_query(self, Microlocation, 'id', view_kwargs['microlocation_id'], 'microlocation_id')
84 query_ = query_.join(Microlocation).filter(Microlocation.id == microlocation.id)
85 if view_kwargs.get('user_id') is not None:
86 user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')
87 query_ = query_.join(User).filter(User.id == user.id)
88 query_ = event_query(self, query_, view_kwargs)
89 if view_kwargs.get('speaker_id'):
90 speaker = safe_query(self, Speaker, 'id', view_kwargs['speaker_id'], 'speaker_id')
91 # session-speaker :: many-to-many relationship
92 query_ = Session.query.filter(Session.speakers.any(id=speaker.id))
93
94 return query_
95
96 view_kwargs = True
97 methods = ['GET']
98 schema = SessionSchema
99 data_layer = {'session': db.session,
100 'model': Session,
101 'methods': {
102 'query': query
103 }}
104
105
106 class SessionDetail(ResourceDetail):
107 """
108 Session detail by id
109 """
110 def before_get_object(self, view_kwargs):
111 """
112 before get method to get the resource id for fetching details
113 :param view_kwargs:
114 :return:
115 """
116 if view_kwargs.get('event_identifier'):
117 event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'identifier')
118 view_kwargs['event_id'] = event.id
119
120 def after_update_object(self, session, data, view_kwargs):
121 """ Send email if session accepted or rejected """
122 if 'state' in data and (session.state == 'accepted' or session.state == 'rejected'):
123 # Email for speaker
124 speakers = session.speakers
125 for speaker in speakers:
126 frontend_url = get_settings()['frontend_url']
127 link = "{}/events/{}/sessions/{}" \
128 .format(frontend_url, session.event_id, session.id)
129 send_email_session_accept_reject(speaker.email, session, link)
130 send_notif_session_accept_reject(speaker, session.title, session.state, link)
131
132 # Email for organizer
133 if session.event.get_organizer():
134 organizer = session.event.get_organizer()
135 organizer_email = organizer.email
136 frontend_url = get_settings()['frontend_url']
137 link = "{}/events/{}/sessions/{}" \
138 .format(frontend_url, session.event_id, session.id)
139 send_email_session_accept_reject(organizer_email, session,
140 link)
141 send_notif_session_accept_reject(organizer, session.title,
142 session.state, link)
143
144 decorators = (api.has_permission('is_speaker_for_session', methods="PATCH,DELETE"),)
145 schema = SessionSchema
146 data_layer = {'session': db.session,
147 'model': Session,
148 'methods': {'before_get_object': before_get_object,
149 'after_update_object': after_update_object}}
150
151
152 class SessionRelationshipRequired(ResourceRelationship):
153 """
154 Session Relationship
155 """
156 schema = SessionSchema
157 decorators = (api.has_permission('is_speaker_for_session', methods="PATCH,DELETE"),)
158 methods = ['GET', 'PATCH']
159 data_layer = {'session': db.session,
160 'model': Session}
161
162
163 class SessionRelationshipOptional(ResourceRelationship):
164 """
165 Session Relationship
166 """
167 schema = SessionSchema
168 decorators = (api.has_permission('is_speaker_for_session', methods="PATCH,DELETE"),)
169 data_layer = {'session': db.session,
170 'model': Session}
171
[end of app/api/sessions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/api/sessions.py b/app/api/sessions.py
--- a/app/api/sessions.py
+++ b/app/api/sessions.py
@@ -2,7 +2,8 @@
from app.api.bootstrap import api
from app.api.events import Event
-from app.api.helpers.db import safe_query
+from app.api.helpers.db import safe_query, get_count
+from app.api.helpers.exceptions import ForbiddenException
from app.api.helpers.mail import send_email_new_session, send_email_session_accept_reject
from app.api.helpers.notification import send_notif_new_session_organizer, send_notif_session_accept_reject
from app.api.helpers.permissions import current_identity
@@ -33,6 +34,8 @@
"""
require_relationship(['event'], data)
data['creator_id'] = current_identity.id
+ if get_count(db.session.query(Event).filter_by(id=int(data['event']), is_sessions_speakers_enabled=False)) > 0:
+ raise ForbiddenException({'pointer': ''}, "Sessions are disabled for this Event")
def after_create_object(self, session, data, view_kwargs):
"""
diff --git a/app/api/speakers.py b/app/api/speakers.py
--- a/app/api/speakers.py
+++ b/app/api/speakers.py
@@ -3,7 +3,8 @@
from flask_rest_jsonapi.exceptions import ObjectNotFound
from app.api.bootstrap import api
-from app.api.helpers.db import safe_query
+from app.api.helpers.db import safe_query, get_count
+from app.api.helpers.exceptions import ForbiddenException
from app.api.helpers.permission_manager import has_access
from app.api.helpers.query import event_query
from app.api.helpers.utilities import require_relationship
@@ -36,6 +37,9 @@
raise ObjectNotFound({'parameter': 'event_id'},
"Event: {} not found".format(data['event_id']))
+ if get_count(db.session.query(Event).filter_by(id=int(data['event']), is_sessions_speakers_enabled=False)) > 0:
+ raise ForbiddenException({'pointer': ''}, "Speakers are disabled for this Event")
+
if 'sessions' in data:
session_ids = data['sessions']
for session_id in session_ids:
| {"golden_diff": "diff --git a/app/api/sessions.py b/app/api/sessions.py\n--- a/app/api/sessions.py\n+++ b/app/api/sessions.py\n@@ -2,7 +2,8 @@\n \n from app.api.bootstrap import api\n from app.api.events import Event\n-from app.api.helpers.db import safe_query\n+from app.api.helpers.db import safe_query, get_count\n+from app.api.helpers.exceptions import ForbiddenException\n from app.api.helpers.mail import send_email_new_session, send_email_session_accept_reject\n from app.api.helpers.notification import send_notif_new_session_organizer, send_notif_session_accept_reject\n from app.api.helpers.permissions import current_identity\n@@ -33,6 +34,8 @@\n \"\"\"\n require_relationship(['event'], data)\n data['creator_id'] = current_identity.id\n+ if get_count(db.session.query(Event).filter_by(id=int(data['event']), is_sessions_speakers_enabled=False)) > 0:\n+ raise ForbiddenException({'pointer': ''}, \"Sessions are disabled for this Event\")\n \n def after_create_object(self, session, data, view_kwargs):\n \"\"\"\ndiff --git a/app/api/speakers.py b/app/api/speakers.py\n--- a/app/api/speakers.py\n+++ b/app/api/speakers.py\n@@ -3,7 +3,8 @@\n from flask_rest_jsonapi.exceptions import ObjectNotFound\n \n from app.api.bootstrap import api\n-from app.api.helpers.db import safe_query\n+from app.api.helpers.db import safe_query, get_count\n+from app.api.helpers.exceptions import ForbiddenException\n from app.api.helpers.permission_manager import has_access\n from app.api.helpers.query import event_query\n from app.api.helpers.utilities import require_relationship\n@@ -36,6 +37,9 @@\n raise ObjectNotFound({'parameter': 'event_id'},\n \"Event: {} not found\".format(data['event_id']))\n \n+ if get_count(db.session.query(Event).filter_by(id=int(data['event']), is_sessions_speakers_enabled=False)) > 0:\n+ raise ForbiddenException({'pointer': ''}, \"Speakers are disabled for this Event\")\n+\n if 'sessions' in data:\n session_ids = data['sessions']\n for session_id in session_ids:\n", "issue": "We can create Sessions and Speakers even if it is disabled in the Event properties.\n**Describe the bug**\r\nWe can create Sessions and Speakers even if the variable ```isSessionsSpeakersEnabled``` is set to false.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Create an event with ```isSessionsSpeakersEnabled``` as false or update an existing one.\r\n2. Now try to create a session or a speaker.\r\n\r\n**Expected behavior**\r\nIt doesn't make sense to create sessions when it is disabled in the event.\r\n\r\n**Additional context**\r\n<!-- Add any other context about the problem here. -->\r\n\r\n\r\n\n", "before_files": [{"content": "from flask import request\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom flask_rest_jsonapi.exceptions import ObjectNotFound\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.query import event_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.speakers import SpeakerSchema\nfrom app.models import db\nfrom app.models.event import Event\nfrom app.models.session import Session\nfrom app.models.speaker import Speaker\nfrom app.models.user import User\n\n\nclass SpeakerListPost(ResourceList):\n \"\"\"\n List and create speakers\n \"\"\"\n\n def before_post(self, args, kwargs, data):\n \"\"\"\n method to add user_id to view_kwargs before post\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event', 'user'], data)\n\n if not has_access('is_coorganizer', event_id=data['event']):\n event = safe_query(self, Event, 'id', data['event'], 'event_id')\n if event.state == \"draft\":\n raise ObjectNotFound({'parameter': 'event_id'},\n \"Event: {} not found\".format(data['event_id']))\n\n if 'sessions' in data:\n session_ids = data['sessions']\n for session_id in session_ids:\n if not has_access('is_session_self_submitted', session_id=session_id):\n raise ObjectNotFound({'parameter': 'session_id'},\n \"Session: {} not found\".format(session_id))\n\n schema = SpeakerSchema\n methods = ['POST', ]\n data_layer = {'session': db.session,\n 'model': Speaker\n }\n\n\nclass SpeakerList(ResourceList):\n \"\"\"\n List speakers based on different params from view_kwargs\n \"\"\"\n\n def query(self, view_kwargs):\n \"\"\"\n query method for speakers list class\n :param view_kwargs:\n :return:\n \"\"\"\n query_ = self.session.query(Speaker)\n query_ = event_query(self, query_, view_kwargs)\n\n if view_kwargs.get('user_id'):\n user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')\n query_ = query_.join(User).filter(User.id == user.id)\n\n if view_kwargs.get('session_id'):\n session = safe_query(self, Session, 'id', view_kwargs['session_id'], 'session_id')\n # session-speaker :: many-to-many relationship\n query_ = Speaker.query.filter(Speaker.sessions.any(id=session.id))\n if 'Authorization' in request.headers and not has_access('is_coorganizer', event_id=session.event_id):\n if not has_access('is_session_self_submitted', session_id=session.id):\n query_ = query_.filter(Session.state == \"approved\" or Session.state == \"accepted\")\n\n return query_\n\n view_kwargs = True\n schema = SpeakerSchema\n methods = ['GET', ]\n data_layer = {'session': db.session,\n 'model': Speaker,\n 'methods': {\n 'query': query,\n }}\n\n\nclass SpeakerDetail(ResourceDetail):\n \"\"\"\n Speakers Detail by id\n \"\"\"\n decorators = (api.has_permission('is_coorganizer_or_user_itself', methods=\"PATCH,DELETE\", fetch=\"event_id\",\n fetch_as=\"event_id\", model=Speaker),)\n schema = SpeakerSchema\n data_layer = {'session': db.session,\n 'model': Speaker}\n\n\nclass SpeakerRelationshipRequired(ResourceRelationship):\n \"\"\"\n Speaker Relationship class for required entities\n \"\"\"\n decorators = (api.has_permission('is_coorganizer_or_user_itself', methods=\"PATCH,DELETE\", fetch=\"event_id\",\n fetch_as=\"event_id\", model=Speaker),)\n methods = ['GET', 'PATCH']\n schema = SpeakerSchema\n data_layer = {'session': db.session,\n 'model': Speaker}\n\n\nclass SpeakerRelationshipOptional(ResourceRelationship):\n \"\"\"\n Speaker Relationship class\n \"\"\"\n decorators = (api.has_permission('is_coorganizer_or_user_itself', methods=\"PATCH,DELETE\", fetch=\"event_id\",\n fetch_as=\"event_id\", model=Speaker),)\n schema = SpeakerSchema\n data_layer = {'session': db.session,\n 'model': Speaker}\n", "path": "app/api/speakers.py"}, {"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\n\nfrom app.api.bootstrap import api\nfrom app.api.events import Event\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.mail import send_email_new_session, send_email_session_accept_reject\nfrom app.api.helpers.notification import send_notif_new_session_organizer, send_notif_session_accept_reject\nfrom app.api.helpers.permissions import current_identity\nfrom app.api.helpers.query import event_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.sessions import SessionSchema\nfrom app.models import db\nfrom app.models.microlocation import Microlocation\nfrom app.models.session import Session\nfrom app.models.session_type import SessionType\nfrom app.models.speaker import Speaker\nfrom app.models.track import Track\nfrom app.models.user import User\nfrom app.settings import get_settings\n\n\nclass SessionListPost(ResourceList):\n \"\"\"\n List Sessions\n \"\"\"\n def before_post(self, args, kwargs, data):\n \"\"\"\n before post method to check for required relationship and proper permission\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event'], data)\n data['creator_id'] = current_identity.id\n\n def after_create_object(self, session, data, view_kwargs):\n \"\"\"\n method to send email for creation of new session\n mails session link to the concerned user\n :param session:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if session.event.get_organizer():\n event_name = session.event.name\n organizer = session.event.get_organizer()\n organizer_email = organizer.email\n frontend_url = get_settings()['frontend_url']\n link = \"{}/events/{}/sessions/{}\"\\\n .format(frontend_url, session.event_id, session.id)\n send_email_new_session(organizer_email, event_name, link)\n send_notif_new_session_organizer(organizer, event_name, link)\n\n decorators = (api.has_permission('create_event'),)\n schema = SessionSchema\n data_layer = {'session': db.session,\n 'model': Session,\n 'methods': {'after_create_object': after_create_object\n }}\n\n\nclass SessionList(ResourceList):\n \"\"\"\n List Sessions\n \"\"\"\n\n def query(self, view_kwargs):\n \"\"\"\n query method for SessionList class\n :param view_kwargs:\n :return:\n \"\"\"\n query_ = self.session.query(Session)\n if view_kwargs.get('track_id') is not None:\n track = safe_query(self, Track, 'id', view_kwargs['track_id'], 'track_id')\n query_ = query_.join(Track).filter(Track.id == track.id)\n if view_kwargs.get('session_type_id') is not None:\n session_type = safe_query(self, SessionType, 'id', view_kwargs['session_type_id'], 'session_type_id')\n query_ = query_.join(SessionType).filter(SessionType.id == session_type.id)\n if view_kwargs.get('microlocation_id') is not None:\n microlocation = safe_query(self, Microlocation, 'id', view_kwargs['microlocation_id'], 'microlocation_id')\n query_ = query_.join(Microlocation).filter(Microlocation.id == microlocation.id)\n if view_kwargs.get('user_id') is not None:\n user = safe_query(self, User, 'id', view_kwargs['user_id'], 'user_id')\n query_ = query_.join(User).filter(User.id == user.id)\n query_ = event_query(self, query_, view_kwargs)\n if view_kwargs.get('speaker_id'):\n speaker = safe_query(self, Speaker, 'id', view_kwargs['speaker_id'], 'speaker_id')\n # session-speaker :: many-to-many relationship\n query_ = Session.query.filter(Session.speakers.any(id=speaker.id))\n\n return query_\n\n view_kwargs = True\n methods = ['GET']\n schema = SessionSchema\n data_layer = {'session': db.session,\n 'model': Session,\n 'methods': {\n 'query': query\n }}\n\n\nclass SessionDetail(ResourceDetail):\n \"\"\"\n Session detail by id\n \"\"\"\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get method to get the resource id for fetching details\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('event_identifier'):\n event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'identifier')\n view_kwargs['event_id'] = event.id\n\n def after_update_object(self, session, data, view_kwargs):\n \"\"\" Send email if session accepted or rejected \"\"\"\n if 'state' in data and (session.state == 'accepted' or session.state == 'rejected'):\n # Email for speaker\n speakers = session.speakers\n for speaker in speakers:\n frontend_url = get_settings()['frontend_url']\n link = \"{}/events/{}/sessions/{}\" \\\n .format(frontend_url, session.event_id, session.id)\n send_email_session_accept_reject(speaker.email, session, link)\n send_notif_session_accept_reject(speaker, session.title, session.state, link)\n\n # Email for organizer\n if session.event.get_organizer():\n organizer = session.event.get_organizer()\n organizer_email = organizer.email\n frontend_url = get_settings()['frontend_url']\n link = \"{}/events/{}/sessions/{}\" \\\n .format(frontend_url, session.event_id, session.id)\n send_email_session_accept_reject(organizer_email, session,\n link)\n send_notif_session_accept_reject(organizer, session.title,\n session.state, link)\n\n decorators = (api.has_permission('is_speaker_for_session', methods=\"PATCH,DELETE\"),)\n schema = SessionSchema\n data_layer = {'session': db.session,\n 'model': Session,\n 'methods': {'before_get_object': before_get_object,\n 'after_update_object': after_update_object}}\n\n\nclass SessionRelationshipRequired(ResourceRelationship):\n \"\"\"\n Session Relationship\n \"\"\"\n schema = SessionSchema\n decorators = (api.has_permission('is_speaker_for_session', methods=\"PATCH,DELETE\"),)\n methods = ['GET', 'PATCH']\n data_layer = {'session': db.session,\n 'model': Session}\n\n\nclass SessionRelationshipOptional(ResourceRelationship):\n \"\"\"\n Session Relationship\n \"\"\"\n schema = SessionSchema\n decorators = (api.has_permission('is_speaker_for_session', methods=\"PATCH,DELETE\"),)\n data_layer = {'session': db.session,\n 'model': Session}\n", "path": "app/api/sessions.py"}]} | 3,803 | 472 |
gh_patches_debug_31473 | rasdani/github-patches | git_diff | sopel-irc__sopel-1498 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
adminchannel: Restore op/voice commands
@kwaaak made a very good point the other day in PM on IRC: `adminchannel`'s commands for managing ops and voices were removed (902e715) for seemingly no particular reason.
I haven't decided when to do it (for 6.x or wait for Sopel 7), but I do think those commands should come back. They'll need a bit of tweaking, but nothing major. Milestone 6.6.0 for now, I guess, to be punted if I can think of a reason it should be delayed.
</issue>
<code>
[start of sopel/modules/adminchannel.py]
1 # coding=utf-8
2 """
3 adminchannel.py - Sopel Channel Admin Module
4 Copyright 2010-2011, Michael Yanovich, Alek Rollyson, and Elsie Powell
5 Copyright © 2012, Elad Alfassa <[email protected]>
6 Licensed under the Eiffel Forum License 2.
7
8 https://sopel.chat
9 """
10 from __future__ import unicode_literals, absolute_import, print_function, division
11
12 import re
13
14 from sopel import formatting
15 from sopel.module import (
16 commands, example, priority, OP, HALFOP, require_privilege, require_chanmsg
17 )
18 from sopel.tools import Identifier
19
20
21 def default_mask(trigger):
22 welcome = formatting.color('Welcome to:', formatting.colors.PURPLE)
23 chan = formatting.color(trigger.sender, formatting.colors.TEAL)
24 topic_ = formatting.bold('Topic:')
25 topic_ = formatting.color('| ' + topic_, formatting.colors.PURPLE)
26 arg = formatting.color('{}', formatting.colors.GREEN)
27 return '{} {} {} {}'.format(welcome, chan, topic_, arg)
28
29
30 @require_chanmsg
31 @require_privilege(OP, 'You are not a channel operator.')
32 @commands('kick')
33 @priority('high')
34 def kick(bot, trigger):
35 """Kick a user from the channel."""
36 if bot.channels[trigger.sender].privileges[bot.nick] < HALFOP:
37 return bot.reply("I'm not a channel operator!")
38 text = trigger.group().split()
39 argc = len(text)
40 if argc < 2:
41 return
42 opt = Identifier(text[1])
43 nick = opt
44 channel = trigger.sender
45 reasonidx = 2
46 if not opt.is_nick():
47 if argc < 3:
48 return
49 nick = text[2]
50 channel = opt
51 reasonidx = 3
52 reason = ' '.join(text[reasonidx:])
53 if nick != bot.config.core.nick:
54 bot.write(['KICK', channel, nick], reason)
55
56
57 def configureHostMask(mask):
58 if mask == '*!*@*':
59 return mask
60 if re.match('^[^.@!/]+$', mask) is not None:
61 return '%s!*@*' % mask
62 if re.match('^[^@!]+$', mask) is not None:
63 return '*!*@%s' % mask
64
65 m = re.match('^([^!@]+)@$', mask)
66 if m is not None:
67 return '*!%s@*' % m.group(1)
68
69 m = re.match('^([^!@]+)@([^@!]+)$', mask)
70 if m is not None:
71 return '*!%s@%s' % (m.group(1), m.group(2))
72
73 m = re.match('^([^!@]+)!(^[!@]+)@?$', mask)
74 if m is not None:
75 return '%s!%s@*' % (m.group(1), m.group(2))
76 return ''
77
78
79 @require_chanmsg
80 @require_privilege(OP, 'You are not a channel operator.')
81 @commands('ban')
82 @priority('high')
83 def ban(bot, trigger):
84 """Ban a user from the channel
85
86 The bot must be a channel operator for this command to work.
87 """
88 if bot.channels[trigger.sender].privileges[bot.nick] < HALFOP:
89 return bot.reply("I'm not a channel operator!")
90 text = trigger.group().split()
91 argc = len(text)
92 if argc < 2:
93 return
94 opt = Identifier(text[1])
95 banmask = opt
96 channel = trigger.sender
97 if not opt.is_nick():
98 if argc < 3:
99 return
100 channel = opt
101 banmask = text[2]
102 banmask = configureHostMask(banmask)
103 if banmask == '':
104 return
105 bot.write(['MODE', channel, '+b', banmask])
106
107
108 @require_chanmsg
109 @require_privilege(OP, 'You are not a channel operator.')
110 @commands('unban')
111 def unban(bot, trigger):
112 """Unban a user from the channel
113
114 The bot must be a channel operator for this command to work.
115 """
116 if bot.channels[trigger.sender].privileges[bot.nick] < HALFOP:
117 return bot.reply("I'm not a channel operator!")
118 text = trigger.group().split()
119 argc = len(text)
120 if argc < 2:
121 return
122 opt = Identifier(text[1])
123 banmask = opt
124 channel = trigger.sender
125 if not opt.is_nick():
126 if argc < 3:
127 return
128 channel = opt
129 banmask = text[2]
130 banmask = configureHostMask(banmask)
131 if banmask == '':
132 return
133 bot.write(['MODE', channel, '-b', banmask])
134
135
136 @require_chanmsg
137 @require_privilege(OP, 'You are not a channel operator.')
138 @commands('quiet')
139 def quiet(bot, trigger):
140 """Quiet a user
141
142 The bot must be a channel operator for this command to work.
143 """
144 if bot.channels[trigger.sender].privileges[bot.nick] < OP:
145 return bot.reply("I'm not a channel operator!")
146 text = trigger.group().split()
147 argc = len(text)
148 if argc < 2:
149 return
150 opt = Identifier(text[1])
151 quietmask = opt
152 channel = trigger.sender
153 if not opt.is_nick():
154 if argc < 3:
155 return
156 quietmask = text[2]
157 channel = opt
158 quietmask = configureHostMask(quietmask)
159 if quietmask == '':
160 return
161 bot.write(['MODE', channel, '+q', quietmask])
162
163
164 @require_chanmsg
165 @require_privilege(OP, 'You are not a channel operator.')
166 @commands('unquiet')
167 def unquiet(bot, trigger):
168 """Unquiet a user
169
170 The bot must be a channel operator for this command to work.
171 """
172 if bot.channels[trigger.sender].privileges[bot.nick] < OP:
173 return bot.reply("I'm not a channel operator!")
174 text = trigger.group().split()
175 argc = len(text)
176 if argc < 2:
177 return
178 opt = Identifier(text[1])
179 quietmask = opt
180 channel = trigger.sender
181 if not opt.is_nick():
182 if argc < 3:
183 return
184 quietmask = text[2]
185 channel = opt
186 quietmask = configureHostMask(quietmask)
187 if quietmask == '':
188 return
189 bot.write(['MODE', channel, '-q', quietmask])
190
191
192 @require_chanmsg
193 @require_privilege(OP, 'You are not a channel operator.')
194 @commands('kickban', 'kb')
195 @example('.kickban [#chan] user1 user!*@* get out of here')
196 @priority('high')
197 def kickban(bot, trigger):
198 """Kick and ban a user from the channel
199
200 The bot must be a channel operator for this command to work.
201 """
202 if bot.channels[trigger.sender].privileges[bot.nick] < HALFOP:
203 return bot.reply("I'm not a channel operator!")
204 text = trigger.group().split()
205 argc = len(text)
206 if argc < 4:
207 return
208 opt = Identifier(text[1])
209 nick = opt
210 mask = text[2]
211 channel = trigger.sender
212 reasonidx = 3
213 if not opt.is_nick():
214 if argc < 5:
215 return
216 channel = opt
217 nick = text[2]
218 mask = text[3]
219 reasonidx = 4
220 reason = ' '.join(text[reasonidx:])
221 mask = configureHostMask(mask)
222 if mask == '':
223 return
224 bot.write(['MODE', channel, '+b', mask])
225 bot.write(['KICK', channel, nick], reason)
226
227
228 @require_chanmsg
229 @require_privilege(OP, 'You are not a channel operator.')
230 @commands('topic')
231 def topic(bot, trigger):
232 """Change the channel topic
233
234 The bot must be a channel operator for this command to work.
235 """
236 if bot.channels[trigger.sender].privileges[bot.nick] < HALFOP:
237 return bot.reply("I'm not a channel operator!")
238 if not trigger.group(2):
239 return
240 channel = trigger.sender.lower()
241
242 narg = 1
243 mask = None
244 mask = bot.db.get_channel_value(channel, 'topic_mask')
245 mask = mask or default_mask(trigger)
246 mask = mask.replace('%s', '{}')
247 narg = len(re.findall('{}', mask))
248
249 top = trigger.group(2)
250 args = []
251 if top:
252 args = top.split('~', narg)
253
254 if len(args) != narg:
255 message = "Not enough arguments. You gave {}, it requires {}.".format(
256 len(args), narg)
257 return bot.say(message)
258 topic = mask.format(*args)
259
260 bot.write(('TOPIC', channel + ' :' + topic))
261
262
263 @require_chanmsg
264 @require_privilege(OP, 'You are not a channel operator.')
265 @commands('tmask')
266 def set_mask(bot, trigger):
267 """Set the topic mask to use for the current channel
268
269 Within the topic mask, {} is used to allow substituting in chunks of text.
270
271 This mask is used when running the 'topic' command.
272 """
273 bot.db.set_channel_value(trigger.sender, 'topic_mask', trigger.group(2))
274 bot.say("Gotcha, " + trigger.nick)
275
276
277 @require_chanmsg
278 @require_privilege(OP, 'You are not a channel operator.')
279 @commands('showmask')
280 def show_mask(bot, trigger):
281 """Show the topic mask for the current channel."""
282 mask = bot.db.get_channel_value(trigger.sender, 'topic_mask')
283 mask = mask or default_mask(trigger)
284 bot.say(mask)
285
[end of sopel/modules/adminchannel.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sopel/modules/adminchannel.py b/sopel/modules/adminchannel.py
--- a/sopel/modules/adminchannel.py
+++ b/sopel/modules/adminchannel.py
@@ -19,6 +19,74 @@
return '{} {} {} {}'.format(welcome, chan, topic_, arg)
+@require_chanmsg
+@require_privilege(OP, 'You are not a channel operator.')
+@commands('op')
+def op(bot, trigger):
+ """
+ Command to op users in a room. If no nick is given,
+ Sopel will op the nick who sent the command
+ """
+ if bot.channels[trigger.sender].privileges[bot.nick] < OP:
+ return bot.reply("I'm not a channel operator!")
+ nick = trigger.group(2)
+ channel = trigger.sender
+ if not nick:
+ nick = trigger.nick
+ bot.write(['MODE', channel, "+o", nick])
+
+
+@require_chanmsg
+@require_privilege(OP, 'You are not a channel operator.')
+@commands('deop')
+def deop(bot, trigger):
+ """
+ Command to deop users in a room. If no nick is given,
+ Sopel will deop the nick who sent the command
+ """
+ if bot.channels[trigger.sender].privileges[bot.nick] < OP:
+ return bot.reply("I'm not a channel operator!")
+ nick = trigger.group(2)
+ channel = trigger.sender
+ if not nick:
+ nick = trigger.nick
+ bot.write(['MODE', channel, "-o", nick])
+
+
+@require_chanmsg
+@require_privilege(OP, 'You are not a channel operator.')
+@commands('voice')
+def voice(bot, trigger):
+ """
+ Command to voice users in a room. If no nick is given,
+ Sopel will voice the nick who sent the command
+ """
+ if bot.channels[trigger.sender].privileges[bot.nick] < HALFOP:
+ return bot.reply("I'm not a channel operator!")
+ nick = trigger.group(2)
+ channel = trigger.sender
+ if not nick:
+ nick = trigger.nick
+ bot.write(['MODE', channel, "+v", nick])
+
+
+@require_chanmsg
+@require_privilege(OP, 'You are not a channel operator.')
+@commands('devoice')
+def devoice(bot, trigger):
+ """
+ Command to devoice users in a room. If no nick is given,
+ Sopel will devoice the nick who sent the command
+ """
+ if bot.channels[trigger.sender].privileges[bot.nick] < HALFOP:
+ return bot.reply("I'm not a channel operator!")
+ nick = trigger.group(2)
+ channel = trigger.sender
+ if not nick:
+ nick = trigger.nick
+ bot.write(['MODE', channel, "-v", nick])
+
+
@require_chanmsg
@require_privilege(OP, 'You are not a channel operator.')
@commands('kick')
| {"golden_diff": "diff --git a/sopel/modules/adminchannel.py b/sopel/modules/adminchannel.py\n--- a/sopel/modules/adminchannel.py\n+++ b/sopel/modules/adminchannel.py\n@@ -19,6 +19,74 @@\n return '{} {} {} {}'.format(welcome, chan, topic_, arg)\n \n \n+@require_chanmsg\n+@require_privilege(OP, 'You are not a channel operator.')\n+@commands('op')\n+def op(bot, trigger):\n+ \"\"\"\n+ Command to op users in a room. If no nick is given,\n+ Sopel will op the nick who sent the command\n+ \"\"\"\n+ if bot.channels[trigger.sender].privileges[bot.nick] < OP:\n+ return bot.reply(\"I'm not a channel operator!\")\n+ nick = trigger.group(2)\n+ channel = trigger.sender\n+ if not nick:\n+ nick = trigger.nick\n+ bot.write(['MODE', channel, \"+o\", nick])\n+\n+\n+@require_chanmsg\n+@require_privilege(OP, 'You are not a channel operator.')\n+@commands('deop')\n+def deop(bot, trigger):\n+ \"\"\"\n+ Command to deop users in a room. If no nick is given,\n+ Sopel will deop the nick who sent the command\n+ \"\"\"\n+ if bot.channels[trigger.sender].privileges[bot.nick] < OP:\n+ return bot.reply(\"I'm not a channel operator!\")\n+ nick = trigger.group(2)\n+ channel = trigger.sender\n+ if not nick:\n+ nick = trigger.nick\n+ bot.write(['MODE', channel, \"-o\", nick])\n+\n+\n+@require_chanmsg\n+@require_privilege(OP, 'You are not a channel operator.')\n+@commands('voice')\n+def voice(bot, trigger):\n+ \"\"\"\n+ Command to voice users in a room. If no nick is given,\n+ Sopel will voice the nick who sent the command\n+ \"\"\"\n+ if bot.channels[trigger.sender].privileges[bot.nick] < HALFOP:\n+ return bot.reply(\"I'm not a channel operator!\")\n+ nick = trigger.group(2)\n+ channel = trigger.sender\n+ if not nick:\n+ nick = trigger.nick\n+ bot.write(['MODE', channel, \"+v\", nick])\n+\n+\n+@require_chanmsg\n+@require_privilege(OP, 'You are not a channel operator.')\n+@commands('devoice')\n+def devoice(bot, trigger):\n+ \"\"\"\n+ Command to devoice users in a room. If no nick is given,\n+ Sopel will devoice the nick who sent the command\n+ \"\"\"\n+ if bot.channels[trigger.sender].privileges[bot.nick] < HALFOP:\n+ return bot.reply(\"I'm not a channel operator!\")\n+ nick = trigger.group(2)\n+ channel = trigger.sender\n+ if not nick:\n+ nick = trigger.nick\n+ bot.write(['MODE', channel, \"-v\", nick])\n+\n+\n @require_chanmsg\n @require_privilege(OP, 'You are not a channel operator.')\n @commands('kick')\n", "issue": "adminchannel: Restore op/voice commands\n@kwaaak made a very good point the other day in PM on IRC: `adminchannel`'s commands for managing ops and voices were removed (902e715) for seemingly no particular reason.\r\n\r\nI haven't decided when to do it (for 6.x or wait for Sopel 7), but I do think those commands should come back. They'll need a bit of tweaking, but nothing major. Milestone 6.6.0 for now, I guess, to be punted if I can think of a reason it should be delayed.\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nadminchannel.py - Sopel Channel Admin Module\nCopyright 2010-2011, Michael Yanovich, Alek Rollyson, and Elsie Powell\nCopyright \u00a9 2012, Elad Alfassa <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport re\n\nfrom sopel import formatting\nfrom sopel.module import (\n commands, example, priority, OP, HALFOP, require_privilege, require_chanmsg\n)\nfrom sopel.tools import Identifier\n\n\ndef default_mask(trigger):\n welcome = formatting.color('Welcome to:', formatting.colors.PURPLE)\n chan = formatting.color(trigger.sender, formatting.colors.TEAL)\n topic_ = formatting.bold('Topic:')\n topic_ = formatting.color('| ' + topic_, formatting.colors.PURPLE)\n arg = formatting.color('{}', formatting.colors.GREEN)\n return '{} {} {} {}'.format(welcome, chan, topic_, arg)\n\n\n@require_chanmsg\n@require_privilege(OP, 'You are not a channel operator.')\n@commands('kick')\n@priority('high')\ndef kick(bot, trigger):\n \"\"\"Kick a user from the channel.\"\"\"\n if bot.channels[trigger.sender].privileges[bot.nick] < HALFOP:\n return bot.reply(\"I'm not a channel operator!\")\n text = trigger.group().split()\n argc = len(text)\n if argc < 2:\n return\n opt = Identifier(text[1])\n nick = opt\n channel = trigger.sender\n reasonidx = 2\n if not opt.is_nick():\n if argc < 3:\n return\n nick = text[2]\n channel = opt\n reasonidx = 3\n reason = ' '.join(text[reasonidx:])\n if nick != bot.config.core.nick:\n bot.write(['KICK', channel, nick], reason)\n\n\ndef configureHostMask(mask):\n if mask == '*!*@*':\n return mask\n if re.match('^[^.@!/]+$', mask) is not None:\n return '%s!*@*' % mask\n if re.match('^[^@!]+$', mask) is not None:\n return '*!*@%s' % mask\n\n m = re.match('^([^!@]+)@$', mask)\n if m is not None:\n return '*!%s@*' % m.group(1)\n\n m = re.match('^([^!@]+)@([^@!]+)$', mask)\n if m is not None:\n return '*!%s@%s' % (m.group(1), m.group(2))\n\n m = re.match('^([^!@]+)!(^[!@]+)@?$', mask)\n if m is not None:\n return '%s!%s@*' % (m.group(1), m.group(2))\n return ''\n\n\n@require_chanmsg\n@require_privilege(OP, 'You are not a channel operator.')\n@commands('ban')\n@priority('high')\ndef ban(bot, trigger):\n \"\"\"Ban a user from the channel\n\n The bot must be a channel operator for this command to work.\n \"\"\"\n if bot.channels[trigger.sender].privileges[bot.nick] < HALFOP:\n return bot.reply(\"I'm not a channel operator!\")\n text = trigger.group().split()\n argc = len(text)\n if argc < 2:\n return\n opt = Identifier(text[1])\n banmask = opt\n channel = trigger.sender\n if not opt.is_nick():\n if argc < 3:\n return\n channel = opt\n banmask = text[2]\n banmask = configureHostMask(banmask)\n if banmask == '':\n return\n bot.write(['MODE', channel, '+b', banmask])\n\n\n@require_chanmsg\n@require_privilege(OP, 'You are not a channel operator.')\n@commands('unban')\ndef unban(bot, trigger):\n \"\"\"Unban a user from the channel\n\n The bot must be a channel operator for this command to work.\n \"\"\"\n if bot.channels[trigger.sender].privileges[bot.nick] < HALFOP:\n return bot.reply(\"I'm not a channel operator!\")\n text = trigger.group().split()\n argc = len(text)\n if argc < 2:\n return\n opt = Identifier(text[1])\n banmask = opt\n channel = trigger.sender\n if not opt.is_nick():\n if argc < 3:\n return\n channel = opt\n banmask = text[2]\n banmask = configureHostMask(banmask)\n if banmask == '':\n return\n bot.write(['MODE', channel, '-b', banmask])\n\n\n@require_chanmsg\n@require_privilege(OP, 'You are not a channel operator.')\n@commands('quiet')\ndef quiet(bot, trigger):\n \"\"\"Quiet a user\n\n The bot must be a channel operator for this command to work.\n \"\"\"\n if bot.channels[trigger.sender].privileges[bot.nick] < OP:\n return bot.reply(\"I'm not a channel operator!\")\n text = trigger.group().split()\n argc = len(text)\n if argc < 2:\n return\n opt = Identifier(text[1])\n quietmask = opt\n channel = trigger.sender\n if not opt.is_nick():\n if argc < 3:\n return\n quietmask = text[2]\n channel = opt\n quietmask = configureHostMask(quietmask)\n if quietmask == '':\n return\n bot.write(['MODE', channel, '+q', quietmask])\n\n\n@require_chanmsg\n@require_privilege(OP, 'You are not a channel operator.')\n@commands('unquiet')\ndef unquiet(bot, trigger):\n \"\"\"Unquiet a user\n\n The bot must be a channel operator for this command to work.\n \"\"\"\n if bot.channels[trigger.sender].privileges[bot.nick] < OP:\n return bot.reply(\"I'm not a channel operator!\")\n text = trigger.group().split()\n argc = len(text)\n if argc < 2:\n return\n opt = Identifier(text[1])\n quietmask = opt\n channel = trigger.sender\n if not opt.is_nick():\n if argc < 3:\n return\n quietmask = text[2]\n channel = opt\n quietmask = configureHostMask(quietmask)\n if quietmask == '':\n return\n bot.write(['MODE', channel, '-q', quietmask])\n\n\n@require_chanmsg\n@require_privilege(OP, 'You are not a channel operator.')\n@commands('kickban', 'kb')\n@example('.kickban [#chan] user1 user!*@* get out of here')\n@priority('high')\ndef kickban(bot, trigger):\n \"\"\"Kick and ban a user from the channel\n\n The bot must be a channel operator for this command to work.\n \"\"\"\n if bot.channels[trigger.sender].privileges[bot.nick] < HALFOP:\n return bot.reply(\"I'm not a channel operator!\")\n text = trigger.group().split()\n argc = len(text)\n if argc < 4:\n return\n opt = Identifier(text[1])\n nick = opt\n mask = text[2]\n channel = trigger.sender\n reasonidx = 3\n if not opt.is_nick():\n if argc < 5:\n return\n channel = opt\n nick = text[2]\n mask = text[3]\n reasonidx = 4\n reason = ' '.join(text[reasonidx:])\n mask = configureHostMask(mask)\n if mask == '':\n return\n bot.write(['MODE', channel, '+b', mask])\n bot.write(['KICK', channel, nick], reason)\n\n\n@require_chanmsg\n@require_privilege(OP, 'You are not a channel operator.')\n@commands('topic')\ndef topic(bot, trigger):\n \"\"\"Change the channel topic\n\n The bot must be a channel operator for this command to work.\n \"\"\"\n if bot.channels[trigger.sender].privileges[bot.nick] < HALFOP:\n return bot.reply(\"I'm not a channel operator!\")\n if not trigger.group(2):\n return\n channel = trigger.sender.lower()\n\n narg = 1\n mask = None\n mask = bot.db.get_channel_value(channel, 'topic_mask')\n mask = mask or default_mask(trigger)\n mask = mask.replace('%s', '{}')\n narg = len(re.findall('{}', mask))\n\n top = trigger.group(2)\n args = []\n if top:\n args = top.split('~', narg)\n\n if len(args) != narg:\n message = \"Not enough arguments. You gave {}, it requires {}.\".format(\n len(args), narg)\n return bot.say(message)\n topic = mask.format(*args)\n\n bot.write(('TOPIC', channel + ' :' + topic))\n\n\n@require_chanmsg\n@require_privilege(OP, 'You are not a channel operator.')\n@commands('tmask')\ndef set_mask(bot, trigger):\n \"\"\"Set the topic mask to use for the current channel\n\n Within the topic mask, {} is used to allow substituting in chunks of text.\n\n This mask is used when running the 'topic' command.\n \"\"\"\n bot.db.set_channel_value(trigger.sender, 'topic_mask', trigger.group(2))\n bot.say(\"Gotcha, \" + trigger.nick)\n\n\n@require_chanmsg\n@require_privilege(OP, 'You are not a channel operator.')\n@commands('showmask')\ndef show_mask(bot, trigger):\n \"\"\"Show the topic mask for the current channel.\"\"\"\n mask = bot.db.get_channel_value(trigger.sender, 'topic_mask')\n mask = mask or default_mask(trigger)\n bot.say(mask)\n", "path": "sopel/modules/adminchannel.py"}]} | 3,558 | 688 |
gh_patches_debug_21397 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-4092 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
testing 5024: order of filter
**URL:** https://meinberlin-dev.liqd.net/projekte/burgerhaushalt-spandau/?mode=list
**user:** any
**expected behaviour:**
**behaviour:**
**important screensize:**
**device & browser:**
**Comment/Question:** the order of the filter was 1) category 2) archive. It should stay this way but as we will touch the filter again later we can also do it then but need to remember. So if easy maybe change now?
Screenshot?
<img width="490" alt="Bildschirmfoto 2021-12-21 um 16 25 15" src="https://user-images.githubusercontent.com/35491681/146955180-11799600-c739-4d17-8f84-7581b57a861b.png">
</issue>
<code>
[start of meinberlin/apps/budgeting/api.py]
1 from django.utils.translation import get_language
2 from django.utils.translation import gettext_lazy as _
3 from django_filters.rest_framework import DjangoFilterBackend
4 from rest_framework import mixins
5 from rest_framework import viewsets
6 from rest_framework.filters import OrderingFilter
7 from rest_framework.pagination import PageNumberPagination
8
9 from adhocracy4.api.mixins import ModuleMixin
10 from adhocracy4.api.permissions import ViewSetRulesPermission
11 from adhocracy4.categories import get_category_icon_url
12 from adhocracy4.categories import has_icons
13 from adhocracy4.categories.models import Category
14 from meinberlin.apps.contrib.filters import IdeaCategoryFilterBackend
15 from meinberlin.apps.votes.api import VotingTokenInfoMixin
16
17 from .models import Proposal
18 from .serializers import ProposalSerializer
19
20
21 # To be changed to a more general IdeaPagination, when using
22 # pagination via rest api for more idea lists
23 class ProposalPagination(PageNumberPagination):
24 page_size = 15
25
26 def get_paginated_response(self, data):
27 response = super(ProposalPagination, self).get_paginated_response(data)
28 response.data['page_size'] = self.page_size
29 response.data['page_count'] = self.page.paginator.num_pages
30 return response
31
32
33 class LocaleInfoMixin:
34 def list(self, request, *args, **kwargs):
35 response = super().list(request, args, kwargs)
36 response.data['locale'] = get_language()
37 return response
38
39
40 class ProposalFilterInfoMixin(ModuleMixin):
41 def list(self, request, *args, **kwargs):
42 """Add the filter information to the data of the Proposal API.
43
44 Needs to be used with rest_framework.mixins.ListModelMixin
45 """
46 filters = {}
47
48 ordering_choices = [('-created', _('Most recent')), ]
49 if self.module.has_feature('rate', Proposal):
50 ordering_choices += ('-positive_rating_count', _('Most popular')),
51 ordering_choices += ('-comment_count', _('Most commented')),
52
53 filters['ordering'] = {
54 'label': _('Ordering'),
55 'choices': ordering_choices,
56 'default': '-created',
57 }
58
59 filters['is_archived'] = {
60 'label': _('Archived'),
61 'choices': [
62 ('', _('All')),
63 ('false', _('No')),
64 ('true', _('Yes')),
65 ],
66 'default': 'false',
67 }
68
69 categories = Category.objects.filter(
70 module=self.module
71 )
72 if categories:
73 category_choices = [('', _('All')), ]
74 if has_icons(self.module):
75 category_icons = []
76 for category in categories:
77 category_choices += (str(category.pk), category.name),
78 if has_icons(self.module):
79 icon_name = getattr(category, 'icon', None)
80 icon_url = get_category_icon_url(icon_name)
81 category_icons += (str(category.pk), icon_url),
82
83 filters['category'] = {
84 'label': _('Category'),
85 'choices': category_choices,
86 }
87 if has_icons(self.module):
88 filters['category']['icons'] = category_icons
89
90 response = super().list(request, args, kwargs)
91 response.data['filters'] = filters
92 return response
93
94
95 class ProposalViewSet(ProposalFilterInfoMixin,
96 LocaleInfoMixin,
97 VotingTokenInfoMixin,
98 mixins.ListModelMixin,
99 viewsets.GenericViewSet,
100 ):
101
102 pagination_class = ProposalPagination
103 serializer_class = ProposalSerializer
104 permission_classes = (ViewSetRulesPermission,)
105 filter_backends = (DjangoFilterBackend,
106 OrderingFilter,
107 IdeaCategoryFilterBackend,)
108 filter_fields = ('is_archived', 'category',)
109 ordering_fields = ('created',
110 'comment_count',
111 'positive_rating_count',)
112
113 def get_permission_object(self):
114 return self.module
115
116 def get_queryset(self):
117 proposals = Proposal.objects\
118 .filter(module=self.module) \
119 .annotate_comment_count() \
120 .annotate_positive_rating_count() \
121 .annotate_negative_rating_count() \
122 .order_by('-created')
123 return proposals
124
[end of meinberlin/apps/budgeting/api.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/budgeting/api.py b/meinberlin/apps/budgeting/api.py
--- a/meinberlin/apps/budgeting/api.py
+++ b/meinberlin/apps/budgeting/api.py
@@ -56,16 +56,6 @@
'default': '-created',
}
- filters['is_archived'] = {
- 'label': _('Archived'),
- 'choices': [
- ('', _('All')),
- ('false', _('No')),
- ('true', _('Yes')),
- ],
- 'default': 'false',
- }
-
categories = Category.objects.filter(
module=self.module
)
@@ -87,6 +77,16 @@
if has_icons(self.module):
filters['category']['icons'] = category_icons
+ filters['is_archived'] = {
+ 'label': _('Archived'),
+ 'choices': [
+ ('', _('All')),
+ ('false', _('No')),
+ ('true', _('Yes')),
+ ],
+ 'default': 'false',
+ }
+
response = super().list(request, args, kwargs)
response.data['filters'] = filters
return response
| {"golden_diff": "diff --git a/meinberlin/apps/budgeting/api.py b/meinberlin/apps/budgeting/api.py\n--- a/meinberlin/apps/budgeting/api.py\n+++ b/meinberlin/apps/budgeting/api.py\n@@ -56,16 +56,6 @@\n 'default': '-created',\n }\n \n- filters['is_archived'] = {\n- 'label': _('Archived'),\n- 'choices': [\n- ('', _('All')),\n- ('false', _('No')),\n- ('true', _('Yes')),\n- ],\n- 'default': 'false',\n- }\n-\n categories = Category.objects.filter(\n module=self.module\n )\n@@ -87,6 +77,16 @@\n if has_icons(self.module):\n filters['category']['icons'] = category_icons\n \n+ filters['is_archived'] = {\n+ 'label': _('Archived'),\n+ 'choices': [\n+ ('', _('All')),\n+ ('false', _('No')),\n+ ('true', _('Yes')),\n+ ],\n+ 'default': 'false',\n+ }\n+\n response = super().list(request, args, kwargs)\n response.data['filters'] = filters\n return response\n", "issue": "testing 5024: order of filter \n**URL:** https://meinberlin-dev.liqd.net/projekte/burgerhaushalt-spandau/?mode=list\r\n**user:** any\r\n**expected behaviour:** \r\n**behaviour:** \r\n**important screensize:**\r\n**device & browser:** \r\n**Comment/Question:** the order of the filter was 1) category 2) archive. It should stay this way but as we will touch the filter again later we can also do it then but need to remember. So if easy maybe change now?\r\n\r\nScreenshot?\r\n<img width=\"490\" alt=\"Bildschirmfoto 2021-12-21 um 16 25 15\" src=\"https://user-images.githubusercontent.com/35491681/146955180-11799600-c739-4d17-8f84-7581b57a861b.png\">\r\n\r\n\n", "before_files": [{"content": "from django.utils.translation import get_language\nfrom django.utils.translation import gettext_lazy as _\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework import mixins\nfrom rest_framework import viewsets\nfrom rest_framework.filters import OrderingFilter\nfrom rest_framework.pagination import PageNumberPagination\n\nfrom adhocracy4.api.mixins import ModuleMixin\nfrom adhocracy4.api.permissions import ViewSetRulesPermission\nfrom adhocracy4.categories import get_category_icon_url\nfrom adhocracy4.categories import has_icons\nfrom adhocracy4.categories.models import Category\nfrom meinberlin.apps.contrib.filters import IdeaCategoryFilterBackend\nfrom meinberlin.apps.votes.api import VotingTokenInfoMixin\n\nfrom .models import Proposal\nfrom .serializers import ProposalSerializer\n\n\n# To be changed to a more general IdeaPagination, when using\n# pagination via rest api for more idea lists\nclass ProposalPagination(PageNumberPagination):\n page_size = 15\n\n def get_paginated_response(self, data):\n response = super(ProposalPagination, self).get_paginated_response(data)\n response.data['page_size'] = self.page_size\n response.data['page_count'] = self.page.paginator.num_pages\n return response\n\n\nclass LocaleInfoMixin:\n def list(self, request, *args, **kwargs):\n response = super().list(request, args, kwargs)\n response.data['locale'] = get_language()\n return response\n\n\nclass ProposalFilterInfoMixin(ModuleMixin):\n def list(self, request, *args, **kwargs):\n \"\"\"Add the filter information to the data of the Proposal API.\n\n Needs to be used with rest_framework.mixins.ListModelMixin\n \"\"\"\n filters = {}\n\n ordering_choices = [('-created', _('Most recent')), ]\n if self.module.has_feature('rate', Proposal):\n ordering_choices += ('-positive_rating_count', _('Most popular')),\n ordering_choices += ('-comment_count', _('Most commented')),\n\n filters['ordering'] = {\n 'label': _('Ordering'),\n 'choices': ordering_choices,\n 'default': '-created',\n }\n\n filters['is_archived'] = {\n 'label': _('Archived'),\n 'choices': [\n ('', _('All')),\n ('false', _('No')),\n ('true', _('Yes')),\n ],\n 'default': 'false',\n }\n\n categories = Category.objects.filter(\n module=self.module\n )\n if categories:\n category_choices = [('', _('All')), ]\n if has_icons(self.module):\n category_icons = []\n for category in categories:\n category_choices += (str(category.pk), category.name),\n if has_icons(self.module):\n icon_name = getattr(category, 'icon', None)\n icon_url = get_category_icon_url(icon_name)\n category_icons += (str(category.pk), icon_url),\n\n filters['category'] = {\n 'label': _('Category'),\n 'choices': category_choices,\n }\n if has_icons(self.module):\n filters['category']['icons'] = category_icons\n\n response = super().list(request, args, kwargs)\n response.data['filters'] = filters\n return response\n\n\nclass ProposalViewSet(ProposalFilterInfoMixin,\n LocaleInfoMixin,\n VotingTokenInfoMixin,\n mixins.ListModelMixin,\n viewsets.GenericViewSet,\n ):\n\n pagination_class = ProposalPagination\n serializer_class = ProposalSerializer\n permission_classes = (ViewSetRulesPermission,)\n filter_backends = (DjangoFilterBackend,\n OrderingFilter,\n IdeaCategoryFilterBackend,)\n filter_fields = ('is_archived', 'category',)\n ordering_fields = ('created',\n 'comment_count',\n 'positive_rating_count',)\n\n def get_permission_object(self):\n return self.module\n\n def get_queryset(self):\n proposals = Proposal.objects\\\n .filter(module=self.module) \\\n .annotate_comment_count() \\\n .annotate_positive_rating_count() \\\n .annotate_negative_rating_count() \\\n .order_by('-created')\n return proposals\n", "path": "meinberlin/apps/budgeting/api.py"}]} | 1,876 | 271 |
gh_patches_debug_6641 | rasdani/github-patches | git_diff | ivy-llc__ivy-14502 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
generalized_normal
</issue>
<code>
[start of ivy/functional/frontends/jax/random.py]
1 # local
2 import ivy
3 from ivy.func_wrapper import with_unsupported_dtypes
4 from ivy.functional.frontends.jax.func_wrapper import (
5 to_ivy_arrays_and_back,
6 handle_jax_dtype,
7 )
8
9
10 @to_ivy_arrays_and_back
11 def PRNGKey(seed):
12 return ivy.array([0, seed % 4294967295 - (seed // 4294967295)], dtype=ivy.int64)
13
14
15 @handle_jax_dtype
16 @to_ivy_arrays_and_back
17 def uniform(key, shape=(), dtype=None, minval=0.0, maxval=1.0):
18 return ivy.random_uniform(
19 low=minval, high=maxval, shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1])
20 )
21
22
23 @handle_jax_dtype
24 @to_ivy_arrays_and_back
25 def normal(key, shape=(), dtype=None):
26 return ivy.random_normal(shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1]))
27
28
29 def _get_seed(key):
30 key1, key2 = int(key[0]), int(key[1])
31 return ivy.to_scalar(int("".join(map(str, [key1, key2]))))
32
33
34 @handle_jax_dtype
35 @to_ivy_arrays_and_back
36 @with_unsupported_dtypes(
37 {
38 "0.3.14 and below": (
39 "float16",
40 "bfloat16",
41 )
42 },
43 "jax",
44 )
45 def beta(key, a, b, shape=None, dtype=None):
46 seed = _get_seed(key)
47 return ivy.beta(a, b, shape=shape, dtype=dtype, seed=seed)
48
49
50 @handle_jax_dtype
51 @to_ivy_arrays_and_back
52 @with_unsupported_dtypes(
53 {
54 "0.3.14 and below": (
55 "float16",
56 "bfloat16",
57 )
58 },
59 "jax",
60 )
61 def dirichlet(key, alpha, shape=None, dtype="float32"):
62 seed = _get_seed(key)
63 alpha = ivy.astype(alpha, dtype)
64 return ivy.dirichlet(alpha, size=shape, dtype=dtype, seed=seed)
65
66
67 @handle_jax_dtype
68 @to_ivy_arrays_and_back
69 @with_unsupported_dtypes(
70 {"0.3.14 and below": ("unsigned", "int8", "int16")},
71 "jax",
72 )
73 def poisson(key, lam, shape=None, dtype=None):
74 seed = _get_seed(key)
75 return ivy.poisson(lam, shape=shape, dtype=dtype, seed=seed)
76
77
78 @handle_jax_dtype
79 @to_ivy_arrays_and_back
80 @with_unsupported_dtypes(
81 {
82 "0.3.14 and below": (
83 "float16",
84 "bfloat16",
85 )
86 },
87 "jax",
88 )
89 def gamma(key, a, shape=None, dtype="float64"):
90 seed = _get_seed(key)
91 return ivy.gamma(a, 1.0, shape=shape, dtype=dtype, seed=seed)
92
93
94 @handle_jax_dtype
95 @to_ivy_arrays_and_back
96 @with_unsupported_dtypes(
97 {
98 "0.3.14 and below": (
99 "float16",
100 "bfloat16",
101 )
102 },
103 "jax",
104 )
105 def gumbel(key, shape=(), dtype="float64"):
106 seed = _get_seed(key)
107 uniform_x = ivy.random_uniform(
108 low=0.0,
109 high=1.0,
110 shape=shape,
111 dtype=dtype,
112 seed=seed,
113 )
114 return -ivy.log(-ivy.log(uniform_x))
115
116
117 @handle_jax_dtype
118 @to_ivy_arrays_and_back
119 @with_unsupported_dtypes(
120 {
121 "0.3.14 and below": (
122 "float16",
123 "bfloat16",
124 )
125 },
126 "jax",
127 )
128 def t(key, df, shape=(), dtype="float64"):
129 seed = _get_seed(key)
130 n = ivy.random_normal(shape=shape, dtype=dtype, seed=seed)
131 half_df = df / 2.0
132 g = ivy.gamma(half_df, 1.0, shape=shape, dtype=dtype, seed=seed)
133 return n * ivy.sqrt(ivy.divide(half_df, g))
134
[end of ivy/functional/frontends/jax/random.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/jax/random.py b/ivy/functional/frontends/jax/random.py
--- a/ivy/functional/frontends/jax/random.py
+++ b/ivy/functional/frontends/jax/random.py
@@ -125,6 +125,14 @@
},
"jax",
)
+def generalized_normal(key, p, shape=(), dtype="float64"):
+ seed = _get_seed(key)
+ g = ivy.gamma(1 / p, 1.0, shape=shape, dtype=dtype, seed=seed)
+ b = ivy.bernoulli(ivy.array([0.5]), shape=shape, dtype=dtype, seed=seed)
+ r = 2 * b - 1
+ return r * g ** (1 / p)
+
+
def t(key, df, shape=(), dtype="float64"):
seed = _get_seed(key)
n = ivy.random_normal(shape=shape, dtype=dtype, seed=seed)
| {"golden_diff": "diff --git a/ivy/functional/frontends/jax/random.py b/ivy/functional/frontends/jax/random.py\n--- a/ivy/functional/frontends/jax/random.py\n+++ b/ivy/functional/frontends/jax/random.py\n@@ -125,6 +125,14 @@\n },\n \"jax\",\n )\n+def generalized_normal(key, p, shape=(), dtype=\"float64\"):\n+ seed = _get_seed(key)\n+ g = ivy.gamma(1 / p, 1.0, shape=shape, dtype=dtype, seed=seed)\n+ b = ivy.bernoulli(ivy.array([0.5]), shape=shape, dtype=dtype, seed=seed)\n+ r = 2 * b - 1\n+ return r * g ** (1 / p)\n+\n+\n def t(key, df, shape=(), dtype=\"float64\"):\n seed = _get_seed(key)\n n = ivy.random_normal(shape=shape, dtype=dtype, seed=seed)\n", "issue": "generalized_normal\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes\nfrom ivy.functional.frontends.jax.func_wrapper import (\n to_ivy_arrays_and_back,\n handle_jax_dtype,\n)\n\n\n@to_ivy_arrays_and_back\ndef PRNGKey(seed):\n return ivy.array([0, seed % 4294967295 - (seed // 4294967295)], dtype=ivy.int64)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\ndef uniform(key, shape=(), dtype=None, minval=0.0, maxval=1.0):\n return ivy.random_uniform(\n low=minval, high=maxval, shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1])\n )\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\ndef normal(key, shape=(), dtype=None):\n return ivy.random_normal(shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1]))\n\n\ndef _get_seed(key):\n key1, key2 = int(key[0]), int(key[1])\n return ivy.to_scalar(int(\"\".join(map(str, [key1, key2]))))\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef beta(key, a, b, shape=None, dtype=None):\n seed = _get_seed(key)\n return ivy.beta(a, b, shape=shape, dtype=dtype, seed=seed)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef dirichlet(key, alpha, shape=None, dtype=\"float32\"):\n seed = _get_seed(key)\n alpha = ivy.astype(alpha, dtype)\n return ivy.dirichlet(alpha, size=shape, dtype=dtype, seed=seed)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\"0.3.14 and below\": (\"unsigned\", \"int8\", \"int16\")},\n \"jax\",\n)\ndef poisson(key, lam, shape=None, dtype=None):\n seed = _get_seed(key)\n return ivy.poisson(lam, shape=shape, dtype=dtype, seed=seed)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef gamma(key, a, shape=None, dtype=\"float64\"):\n seed = _get_seed(key)\n return ivy.gamma(a, 1.0, shape=shape, dtype=dtype, seed=seed)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef gumbel(key, shape=(), dtype=\"float64\"):\n seed = _get_seed(key)\n uniform_x = ivy.random_uniform(\n low=0.0,\n high=1.0,\n shape=shape,\n dtype=dtype,\n seed=seed,\n )\n return -ivy.log(-ivy.log(uniform_x))\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef t(key, df, shape=(), dtype=\"float64\"):\n seed = _get_seed(key)\n n = ivy.random_normal(shape=shape, dtype=dtype, seed=seed)\n half_df = df / 2.0\n g = ivy.gamma(half_df, 1.0, shape=shape, dtype=dtype, seed=seed)\n return n * ivy.sqrt(ivy.divide(half_df, g))\n", "path": "ivy/functional/frontends/jax/random.py"}]} | 1,825 | 227 |
gh_patches_debug_8804 | rasdani/github-patches | git_diff | kivy__kivy-5579 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CTRL-C in a textinput crashes the app
<!--
The issue tracker is a tool to address bugs.
Please use the #kivy IRC channel on freenode or Stack Overflow for
support questions, more information at https://git.io/vM1yQ.
Before opening a new issue, make sure you do the following:
* check that your issue isn't already filed: https://git.io/vM1iE
* prepare a short, runnable example that reproduces the issue
* reproduce the problem with the latest development version of Kivy
* double-check that the issue is indeed a bug and not a support request
-->
### Versions
* Python: 3.6.3
* OS: Windows 10
* Kivy: 1.10.0
* Kivy installation method: using pip, followed the steps shown in the guide
### Description
Trying to use CTRL-C to copy text from a textinput, and the program crashes.
### Code and Logs
```python
import kivy
from kivy.uix.textinput import TextInput
from kivy.app import App
class program(App):
def build(self):
return TextInput()
prog = program()
prog.run()
Traceback (most recent call last):
File "E:/pycharm/MethasiaChatSurelyUnstable/vladegay.py", line 12, in <module>
prog.run()
File "C:\Users\hellNo\AppData\Local\Programs\Python\Python36-32\lib\site-packages\kivy\app.py", line 828, in run
runTouchApp()
File "C:\Users\hellNo\AppData\Local\Programs\Python\Python36-32\lib\site-packages\kivy\base.py", line 504, in runTouchApp
EventLoop.window.mainloop()
File "C:\Users\hellNo\AppData\Local\Programs\Python\Python36-32\lib\site-packages\kivy\core\window\window_sdl2.py", line 663, in mainloop
self._mainloop()
File "C:\Users\hellNo\AppData\Local\Programs\Python\Python36-32\lib\site-packages\kivy\core\window\window_sdl2.py", line 602, in _mainloop
self.modifiers):
File "kivy\_event.pyx", line 714, in kivy._event.EventDispatcher.dispatch (kivy\_event.c:8146)
File "kivy\_event.pyx", line 1225, in kivy._event.EventObservers.dispatch (kivy\_event.c:14035)
File "kivy\_event.pyx", line 1149, in kivy._event.EventObservers._dispatch (kivy\_event.c:13564)
File "C:\Users\hellNo\AppData\Local\Programs\Python\Python36-32\lib\site-packages\kivy\core\window\__init__.py", line 159, in _on_window_key_down
return self.dispatch('on_key_down', keycode, text, modifiers)
File "kivy\_event.pyx", line 714, in kivy._event.EventDispatcher.dispatch (kivy\_event.c:8146)
File "kivy\_event.pyx", line 1225, in kivy._event.EventObservers.dispatch (kivy\_event.c:14035)
File "kivy\_event.pyx", line 1149, in kivy._event.EventObservers._dispatch (kivy\_event.c:13564)
File "C:\Users\hellNo\AppData\Local\Programs\Python\Python36-32\lib\site-packages\kivy\uix\textinput.py", line 2404, in keyboard_on_key_down
self.copy()
File "C:\Users\hellNo\AppData\Local\Programs\Python\Python36-32\lib\site-packages\kivy\uix\textinput.py", line 1712, in copy
return Clipboard.copy(self.selection_text)
File "C:\Users\hellNo\AppData\Local\Programs\Python\Python36-32\lib\site-packages\kivy\core\clipboard\__init__.py", line 73, in copy
self._copy(data)
File "C:\Users\hellNo\AppData\Local\Programs\Python\Python36-32\lib\site-packages\kivy\core\clipboard\__init__.py", line 87, in _copy
self.put(data, self._clip_mime_type)
File "C:\Users\hellNo\AppData\Local\Programs\Python\Python36-32\lib\site-packages\kivy\core\clipboard\clipboard_winctypes.py", line 55, in put
msvcrt.wcscpy_s(c_wchar_p(hCd), len(text), c_wchar_p(text))
ValueError: embedded null character
```
</issue>
<code>
[start of kivy/core/clipboard/clipboard_winctypes.py]
1 '''
2 Clipboard windows: an implementation of the Clipboard using ctypes.
3 '''
4
5 __all__ = ('ClipboardWindows', )
6
7 from kivy.utils import platform
8 from kivy.core.clipboard import ClipboardBase
9
10 if platform != 'win':
11 raise SystemError('unsupported platform for Windows clipboard')
12
13 import ctypes
14 from ctypes import wintypes
15 user32 = ctypes.windll.user32
16 kernel32 = ctypes.windll.kernel32
17 msvcrt = ctypes.cdll.msvcrt
18 c_char_p = ctypes.c_char_p
19 c_wchar_p = ctypes.c_wchar_p
20
21
22 class ClipboardWindows(ClipboardBase):
23
24 def get(self, mimetype='text/plain'):
25 GetClipboardData = user32.GetClipboardData
26 GetClipboardData.argtypes = [wintypes.UINT]
27 GetClipboardData.restype = wintypes.HANDLE
28
29 user32.OpenClipboard(user32.GetActiveWindow())
30 # Standard Clipboard Format "1" is "CF_TEXT"
31 pcontents = GetClipboardData(13)
32
33 # if someone pastes a FILE, the content is None for SCF 13
34 # and the clipboard is locked if not closed properly
35 if not pcontents:
36 user32.CloseClipboard()
37 return ''
38 data = c_wchar_p(pcontents).value.encode(self._encoding)
39 user32.CloseClipboard()
40 return data
41
42 def put(self, text, mimetype='text/plain'):
43 text = text.decode(self._encoding) # auto converted later
44 text += u'\x00'
45
46 SetClipboardData = user32.SetClipboardData
47 SetClipboardData.argtypes = [wintypes.UINT, wintypes.HANDLE]
48 SetClipboardData.restype = wintypes.HANDLE
49
50 GlobalAlloc = kernel32.GlobalAlloc
51 GlobalAlloc.argtypes = [wintypes.UINT, ctypes.c_size_t]
52 GlobalAlloc.restype = wintypes.HGLOBAL
53
54 CF_UNICODETEXT = 13
55
56 user32.OpenClipboard(user32.GetActiveWindow())
57 user32.EmptyClipboard()
58 hCd = GlobalAlloc(0, len(text) * ctypes.sizeof(ctypes.c_wchar))
59 msvcrt.wcscpy_s(c_wchar_p(hCd), len(text), c_wchar_p(text))
60 SetClipboardData(CF_UNICODETEXT, hCd)
61 user32.CloseClipboard()
62
63 def get_types(self):
64 return ['text/plain']
65
[end of kivy/core/clipboard/clipboard_winctypes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kivy/core/clipboard/clipboard_winctypes.py b/kivy/core/clipboard/clipboard_winctypes.py
--- a/kivy/core/clipboard/clipboard_winctypes.py
+++ b/kivy/core/clipboard/clipboard_winctypes.py
@@ -56,7 +56,9 @@
user32.OpenClipboard(user32.GetActiveWindow())
user32.EmptyClipboard()
hCd = GlobalAlloc(0, len(text) * ctypes.sizeof(ctypes.c_wchar))
- msvcrt.wcscpy_s(c_wchar_p(hCd), len(text), c_wchar_p(text))
+
+ # ignore null character for strSource pointer
+ msvcrt.wcscpy_s(c_wchar_p(hCd), len(text), c_wchar_p(text[:-1]))
SetClipboardData(CF_UNICODETEXT, hCd)
user32.CloseClipboard()
| {"golden_diff": "diff --git a/kivy/core/clipboard/clipboard_winctypes.py b/kivy/core/clipboard/clipboard_winctypes.py\n--- a/kivy/core/clipboard/clipboard_winctypes.py\n+++ b/kivy/core/clipboard/clipboard_winctypes.py\n@@ -56,7 +56,9 @@\n user32.OpenClipboard(user32.GetActiveWindow())\n user32.EmptyClipboard()\n hCd = GlobalAlloc(0, len(text) * ctypes.sizeof(ctypes.c_wchar))\n- msvcrt.wcscpy_s(c_wchar_p(hCd), len(text), c_wchar_p(text))\n+\n+ # ignore null character for strSource pointer\n+ msvcrt.wcscpy_s(c_wchar_p(hCd), len(text), c_wchar_p(text[:-1]))\n SetClipboardData(CF_UNICODETEXT, hCd)\n user32.CloseClipboard()\n", "issue": "CTRL-C in a textinput crashes the app\n<!--\r\nThe issue tracker is a tool to address bugs.\r\nPlease use the #kivy IRC channel on freenode or Stack Overflow for\r\nsupport questions, more information at https://git.io/vM1yQ.\r\n\r\nBefore opening a new issue, make sure you do the following:\r\n * check that your issue isn't already filed: https://git.io/vM1iE\r\n * prepare a short, runnable example that reproduces the issue\r\n * reproduce the problem with the latest development version of Kivy\r\n * double-check that the issue is indeed a bug and not a support request\r\n-->\r\n\r\n### Versions\r\n\r\n* Python: 3.6.3\r\n* OS: Windows 10\r\n* Kivy: 1.10.0\r\n* Kivy installation method: using pip, followed the steps shown in the guide\r\n\r\n### Description\r\n\r\nTrying to use CTRL-C to copy text from a textinput, and the program crashes.\r\n\r\n### Code and Logs\r\n\r\n```python\r\nimport kivy\r\nfrom kivy.uix.textinput import TextInput\r\nfrom kivy.app import App\r\n\r\nclass program(App):\r\n def build(self):\r\n return TextInput()\r\n\r\nprog = program()\r\nprog.run()\r\n\r\n\r\nTraceback (most recent call last):\r\n File \"E:/pycharm/MethasiaChatSurelyUnstable/vladegay.py\", line 12, in <module>\r\n prog.run()\r\n File \"C:\\Users\\hellNo\\AppData\\Local\\Programs\\Python\\Python36-32\\lib\\site-packages\\kivy\\app.py\", line 828, in run\r\n runTouchApp()\r\n File \"C:\\Users\\hellNo\\AppData\\Local\\Programs\\Python\\Python36-32\\lib\\site-packages\\kivy\\base.py\", line 504, in runTouchApp\r\n EventLoop.window.mainloop()\r\n File \"C:\\Users\\hellNo\\AppData\\Local\\Programs\\Python\\Python36-32\\lib\\site-packages\\kivy\\core\\window\\window_sdl2.py\", line 663, in mainloop\r\n self._mainloop()\r\n File \"C:\\Users\\hellNo\\AppData\\Local\\Programs\\Python\\Python36-32\\lib\\site-packages\\kivy\\core\\window\\window_sdl2.py\", line 602, in _mainloop\r\n self.modifiers):\r\n File \"kivy\\_event.pyx\", line 714, in kivy._event.EventDispatcher.dispatch (kivy\\_event.c:8146)\r\n File \"kivy\\_event.pyx\", line 1225, in kivy._event.EventObservers.dispatch (kivy\\_event.c:14035)\r\n File \"kivy\\_event.pyx\", line 1149, in kivy._event.EventObservers._dispatch (kivy\\_event.c:13564)\r\n File \"C:\\Users\\hellNo\\AppData\\Local\\Programs\\Python\\Python36-32\\lib\\site-packages\\kivy\\core\\window\\__init__.py\", line 159, in _on_window_key_down\r\n return self.dispatch('on_key_down', keycode, text, modifiers)\r\n File \"kivy\\_event.pyx\", line 714, in kivy._event.EventDispatcher.dispatch (kivy\\_event.c:8146)\r\n File \"kivy\\_event.pyx\", line 1225, in kivy._event.EventObservers.dispatch (kivy\\_event.c:14035)\r\n File \"kivy\\_event.pyx\", line 1149, in kivy._event.EventObservers._dispatch (kivy\\_event.c:13564)\r\n File \"C:\\Users\\hellNo\\AppData\\Local\\Programs\\Python\\Python36-32\\lib\\site-packages\\kivy\\uix\\textinput.py\", line 2404, in keyboard_on_key_down\r\n self.copy()\r\n File \"C:\\Users\\hellNo\\AppData\\Local\\Programs\\Python\\Python36-32\\lib\\site-packages\\kivy\\uix\\textinput.py\", line 1712, in copy\r\n return Clipboard.copy(self.selection_text)\r\n File \"C:\\Users\\hellNo\\AppData\\Local\\Programs\\Python\\Python36-32\\lib\\site-packages\\kivy\\core\\clipboard\\__init__.py\", line 73, in copy\r\n self._copy(data)\r\n File \"C:\\Users\\hellNo\\AppData\\Local\\Programs\\Python\\Python36-32\\lib\\site-packages\\kivy\\core\\clipboard\\__init__.py\", line 87, in _copy\r\n self.put(data, self._clip_mime_type)\r\n File \"C:\\Users\\hellNo\\AppData\\Local\\Programs\\Python\\Python36-32\\lib\\site-packages\\kivy\\core\\clipboard\\clipboard_winctypes.py\", line 55, in put\r\n msvcrt.wcscpy_s(c_wchar_p(hCd), len(text), c_wchar_p(text))\r\n ValueError: embedded null character\r\n\r\n```\r\n\n", "before_files": [{"content": "'''\nClipboard windows: an implementation of the Clipboard using ctypes.\n'''\n\n__all__ = ('ClipboardWindows', )\n\nfrom kivy.utils import platform\nfrom kivy.core.clipboard import ClipboardBase\n\nif platform != 'win':\n raise SystemError('unsupported platform for Windows clipboard')\n\nimport ctypes\nfrom ctypes import wintypes\nuser32 = ctypes.windll.user32\nkernel32 = ctypes.windll.kernel32\nmsvcrt = ctypes.cdll.msvcrt\nc_char_p = ctypes.c_char_p\nc_wchar_p = ctypes.c_wchar_p\n\n\nclass ClipboardWindows(ClipboardBase):\n\n def get(self, mimetype='text/plain'):\n GetClipboardData = user32.GetClipboardData\n GetClipboardData.argtypes = [wintypes.UINT]\n GetClipboardData.restype = wintypes.HANDLE\n\n user32.OpenClipboard(user32.GetActiveWindow())\n # Standard Clipboard Format \"1\" is \"CF_TEXT\"\n pcontents = GetClipboardData(13)\n\n # if someone pastes a FILE, the content is None for SCF 13\n # and the clipboard is locked if not closed properly\n if not pcontents:\n user32.CloseClipboard()\n return ''\n data = c_wchar_p(pcontents).value.encode(self._encoding)\n user32.CloseClipboard()\n return data\n\n def put(self, text, mimetype='text/plain'):\n text = text.decode(self._encoding) # auto converted later\n text += u'\\x00'\n\n SetClipboardData = user32.SetClipboardData\n SetClipboardData.argtypes = [wintypes.UINT, wintypes.HANDLE]\n SetClipboardData.restype = wintypes.HANDLE\n\n GlobalAlloc = kernel32.GlobalAlloc\n GlobalAlloc.argtypes = [wintypes.UINT, ctypes.c_size_t]\n GlobalAlloc.restype = wintypes.HGLOBAL\n\n CF_UNICODETEXT = 13\n\n user32.OpenClipboard(user32.GetActiveWindow())\n user32.EmptyClipboard()\n hCd = GlobalAlloc(0, len(text) * ctypes.sizeof(ctypes.c_wchar))\n msvcrt.wcscpy_s(c_wchar_p(hCd), len(text), c_wchar_p(text))\n SetClipboardData(CF_UNICODETEXT, hCd)\n user32.CloseClipboard()\n\n def get_types(self):\n return ['text/plain']\n", "path": "kivy/core/clipboard/clipboard_winctypes.py"}]} | 2,323 | 195 |
gh_patches_debug_38577 | rasdani/github-patches | git_diff | praw-dev__praw-927 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hard-coded limit in stream_generator() function.
## Issue Description
When I'm create a subreddit read stream, I cannot configure how many items from the past to take. The value 100 is hard-coded in the function stream_generator(). As a result, if I want to start reading comments in subreddit from the current time, I still get up to 100 previous ones and I need to filter them. In my opinion, this value should be passed in the kwargs of function stream_generator().
## System Information
PRAW Version: 5.4.0
Python Version: 3.6
Operating System: Debian 9
</issue>
<code>
[start of praw/models/util.py]
1 """Provide helper classes used by other models."""
2 import random
3 import time
4
5
6 class BoundedSet(object):
7 """A set with a maximum size that evicts the oldest items when necessary.
8
9 This class does not implement the complete set interface.
10 """
11
12 def __init__(self, max_items):
13 """Construct an instance of the BoundedSet."""
14 self.max_items = max_items
15 self._fifo = []
16 self._set = set()
17
18 def __contains__(self, item):
19 """Test if the BoundedSet contains item."""
20 return item in self._set
21
22 def add(self, item):
23 """Add an item to the set discarding the oldest item if necessary."""
24 if len(self._set) == self.max_items:
25 self._set.remove(self._fifo.pop(0))
26 self._fifo.append(item)
27 self._set.add(item)
28
29
30 class ExponentialCounter(object):
31 """A class to provide an exponential counter with jitter."""
32
33 def __init__(self, max_counter):
34 """Initialize an instance of ExponentialCounter.
35
36 :param max_counter: The maximum base value. Note that the computed
37 value may be 3.125% higher due to jitter.
38 """
39 self._base = 1
40 self._max = max_counter
41
42 def counter(self):
43 """Increment the counter and return the current value with jitter."""
44 max_jitter = self._base / 16.
45 value = self._base + random.random() * max_jitter - max_jitter / 2
46 self._base = min(self._base * 2, self._max)
47 return value
48
49 def reset(self):
50 """Reset the counter to 1."""
51 self._base = 1
52
53
54 def permissions_string(permissions, known_permissions):
55 """Return a comma separated string of permission changes.
56
57 :param permissions: A list of strings, or ``None``. These strings can
58 exclusively contain ``+`` or ``-`` prefixes, or contain no prefixes at
59 all. When prefixed, the resulting string will simply be the joining of
60 these inputs. When not prefixed, all permissions are considered to be
61 additions, and all permissions in the ``known_permissions`` set that
62 aren't provided are considered to be removals. When None, the result is
63 ``+all``.
64 :param known_permissions: A set of strings representing the available
65 permissions.
66
67 """
68 to_set = []
69 if permissions is None:
70 to_set = ['+all']
71 else:
72 to_set = ['-all']
73 omitted = sorted(known_permissions - set(permissions))
74 to_set.extend('-{}'.format(x) for x in omitted)
75 to_set.extend('+{}'.format(x) for x in permissions)
76 return ','.join(to_set)
77
78
79 def stream_generator(function, pause_after=None):
80 """Yield new items from ListingGenerators and ``None`` when paused.
81
82 :param function: A callable that returns a ListingGenerator, e.g.
83 ``subreddit.comments`` or ``subreddit.new``.
84
85 :param pause_after: An integer representing the number of requests that
86 result in no new items before this function yields ``None``,
87 effectively introducing a pause into the stream. A negative value
88 yields ``None`` after items from a single response have been yielded,
89 regardless of number of new items obtained in that response. A value of
90 ``0`` yields ``None`` after every response resulting in no new items,
91 and a value of ``None`` never introduces a pause (default: None).
92
93 .. note:: This function internally uses an exponential delay with jitter
94 between subsequent responses that contain no new results, up to a
95 maximum delay of just over a 16 seconds. In practice that means that the
96 time before pause for ``pause_after=N+1`` is approximately twice the
97 time before pause for ``pause_after=N``.
98
99 For example, to create a stream of comment replies, try:
100
101 .. code:: python
102
103 reply_function = reddit.inbox.comment_replies
104 for reply in praw.models.util.stream_generator(reply_function):
105 print(reply)
106
107 To pause a comment stream after six responses with no new
108 comments, try:
109
110 .. code:: python
111
112 subreddit = reddit.subreddit('redditdev')
113 for comment in subreddit.stream.comments(pause_after=6):
114 if comment is None:
115 break
116 print(comment)
117
118 To resume fetching comments after a pause, try:
119
120 .. code:: python
121
122 subreddit = reddit.subreddit('help')
123 comment_stream = subreddit.stream.comments(pause_after=5)
124
125 for comment in comment_stream:
126 if comment is None:
127 break
128 print(comment)
129 # Do any other processing, then try to fetch more data
130 for comment in comment_stream:
131 if comment is None:
132 break
133 print(comment)
134
135 To bypass the internal exponential backoff, try the following. This
136 approach is useful if you are monitoring a subreddit with infrequent
137 activity, and you want the to consistently learn about new items from the
138 stream as soon as possible, rather than up to a delay of just over sixteen
139 seconds.
140
141 .. code:: python
142
143 subreddit = reddit.subreddit('help')
144 for comment in subreddit.stream.comments(pause_after=0):
145 if comment is None:
146 continue
147 print(comment)
148
149 """
150 before_fullname = None
151 exponential_counter = ExponentialCounter(max_counter=16)
152 seen_fullnames = BoundedSet(301)
153 without_before_counter = 0
154 responses_without_new = 0
155 valid_pause_after = pause_after is not None
156 while True:
157 found = False
158 newest_fullname = None
159 limit = 100
160 if before_fullname is None:
161 limit -= without_before_counter
162 without_before_counter = (without_before_counter + 1) % 30
163 for item in reversed(list(function(
164 limit=limit, params={'before': before_fullname}))):
165 if item.fullname in seen_fullnames:
166 continue
167 found = True
168 seen_fullnames.add(item.fullname)
169 newest_fullname = item.fullname
170 yield item
171 before_fullname = newest_fullname
172 if valid_pause_after and pause_after < 0:
173 yield None
174 elif found:
175 exponential_counter.reset()
176 responses_without_new = 0
177 else:
178 responses_without_new += 1
179 if valid_pause_after and responses_without_new > pause_after:
180 exponential_counter.reset()
181 responses_without_new = 0
182 yield None
183 else:
184 time.sleep(exponential_counter.counter())
185
[end of praw/models/util.py]
[start of praw/objector.py]
1 """Provides the Objector class."""
2 import re
3
4 from .exceptions import APIException
5
6
7 class Objector(object):
8 """The objector builds :class:`.RedditBase` objects."""
9
10 @staticmethod
11 def _camel_to_snake(name):
12 """Return `name` converted from camelCase to snake_case.
13
14 Code from http://stackoverflow.com/a/1176023/.
15
16 """
17 first_break_replaced = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
18 return re.sub(
19 '([a-z0-9])([A-Z])', r'\1_\2', first_break_replaced).lower()
20
21 @classmethod
22 def _snake_case_keys(cls, dictionary):
23 """Return a copy of dictionary with keys converted to snake_case.
24
25 :param dictionary: The dict to be corrected.
26
27 """
28 return {cls._camel_to_snake(k): v for k, v in dictionary.items()}
29
30 def __init__(self, reddit):
31 """Initialize an Objector instance.
32
33 :param reddit: An instance of :class:`~.Reddit`.
34
35 """
36 self.parsers = {}
37 self._reddit = reddit
38
39 def kind(self, instance):
40 """Return the kind from the instance class.
41
42 :param instance: An instance of a subclass of RedditBase.
43
44 """
45 for key in self.parsers:
46 if isinstance(instance, self.parsers[key]):
47 return key
48
49 def _objectify_dict(self, data):
50 """Create RedditBase objects from dicts.
51
52 :param data: The structured data, assumed to be a dict.
53 :returns: An instance of :class:`~.RedditBase`.
54
55 """
56 if ({'conversation', 'messages', 'modActions'}.issubset(data)):
57 parser = self.parsers['ModmailConversation']
58 elif {'actionTypeId', 'author', 'date'}.issubset(data):
59 # Modmail mod action
60 data = self._snake_case_keys(data)
61 parser = self.parsers['ModmailAction']
62 elif {'bodyMarkdown', 'isInternal'}.issubset(data):
63 # Modmail message
64 data = self._snake_case_keys(data)
65 parser = self.parsers['ModmailMessage']
66 elif {'isAdmin', 'isDeleted'}.issubset(data):
67 # Modmail author
68 data = self._snake_case_keys(data)
69 # Prevent clobbering base-36 id
70 del data['id']
71 data['is_subreddit_mod'] = data.pop('is_mod')
72 parser = self.parsers[self._reddit.config.kinds['redditor']]
73 elif {'banStatus', 'muteStatus', 'recentComments'}.issubset(data):
74 # Modmail user
75 data = self._snake_case_keys(data)
76 data['created_string'] = data.pop('created')
77 parser = self.parsers[self._reddit.config.kinds['redditor']]
78 elif {'displayName', 'id', 'type'}.issubset(data):
79 # Modmail subreddit
80 data = self._snake_case_keys(data)
81 parser = self.parsers[self._reddit.config.kinds[data['type']]]
82 elif ({'date', 'id', 'name'}.issubset(data)
83 or {'id', 'name', 'permissions'}.issubset(data)):
84 parser = self.parsers[self._reddit.config.kinds['redditor']]
85 else:
86 if 'user' in data:
87 parser = self.parsers[self._reddit.config.kinds['redditor']]
88 data['user'] = parser.parse({'name': data['user']},
89 self._reddit)
90 return data
91 return parser.parse(data, self._reddit)
92
93 def objectify(self, data):
94 """Create RedditBase objects from data.
95
96 :param data: The structured data.
97 :returns: An instance of :class:`~.RedditBase`, or ``None`` if
98 given ``data`` is ``None``.
99
100 """
101 # pylint: disable=too-many-return-statements
102 if data is None: # 204 no content
103 return
104 if isinstance(data, list):
105 return [self.objectify(item) for item in data]
106 if 'kind' in data and data['kind'] in self.parsers:
107 parser = self.parsers[data['kind']]
108 return parser.parse(data['data'], self._reddit)
109 elif 'json' in data and 'data' in data['json']:
110 if 'things' in data['json']['data']: # Submission.reply
111 return self.objectify(data['json']['data']['things'])
112 if 'url' in data['json']['data']: # Subreddit.submit
113 # The URL is the URL to the submission, so it's removed.
114 del data['json']['data']['url']
115 parser = self.parsers[self._reddit.config.kinds['submission']]
116 else:
117 parser = self.parsers['LiveUpdateEvent']
118 return parser.parse(data['json']['data'], self._reddit)
119 elif 'json' in data and 'errors' in data['json']:
120 errors = data['json']['errors']
121 if len(errors) == 1:
122 raise APIException(*errors[0])
123 assert not errors
124
125 elif isinstance(data, dict):
126 return self._objectify_dict(data)
127
128 return data
129
130 def register(self, kind, cls):
131 """Register a class for a given kind.
132
133 :param kind: The kind in the parsed data to map to ``cls``.
134 :param cls: A RedditBase class.
135
136 """
137 self.parsers[kind] = cls
138
[end of praw/objector.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/praw/models/util.py b/praw/models/util.py
--- a/praw/models/util.py
+++ b/praw/models/util.py
@@ -76,7 +76,7 @@
return ','.join(to_set)
-def stream_generator(function, pause_after=None):
+def stream_generator(function, pause_after=None, skip_existing=False):
"""Yield new items from ListingGenerators and ``None`` when paused.
:param function: A callable that returns a ListingGenerator, e.g.
@@ -90,6 +90,10 @@
``0`` yields ``None`` after every response resulting in no new items,
and a value of ``None`` never introduces a pause (default: None).
+ :param skip_existing: When True does not yield any results from the first
+ request thereby skipping any items that existed in the stream prior to
+ starting the stream (default: False).
+
.. note:: This function internally uses an exponential delay with jitter
between subsequent responses that contain no new results, up to a
maximum delay of just over a 16 seconds. In practice that means that the
@@ -159,8 +163,10 @@
found = True
seen_fullnames.add(item.fullname)
newest_fullname = item.fullname
- yield item
+ if not skip_existing:
+ yield item
before_fullname = newest_fullname
+ skip_existing = False
if valid_pause_after and pause_after < 0:
yield None
elif found:
diff --git a/praw/objector.py b/praw/objector.py
--- a/praw/objector.py
+++ b/praw/objector.py
@@ -42,9 +42,12 @@
:param instance: An instance of a subclass of RedditBase.
"""
+ retval = None
for key in self.parsers:
if isinstance(instance, self.parsers[key]):
- return key
+ retval = key
+ break
+ return retval
def _objectify_dict(self, data):
"""Create RedditBase objects from dicts.
@@ -100,7 +103,7 @@
"""
# pylint: disable=too-many-return-statements
if data is None: # 204 no content
- return
+ return None
if isinstance(data, list):
return [self.objectify(item) for item in data]
if 'kind' in data and data['kind'] in self.parsers:
| {"golden_diff": "diff --git a/praw/models/util.py b/praw/models/util.py\n--- a/praw/models/util.py\n+++ b/praw/models/util.py\n@@ -76,7 +76,7 @@\n return ','.join(to_set)\n \n \n-def stream_generator(function, pause_after=None):\n+def stream_generator(function, pause_after=None, skip_existing=False):\n \"\"\"Yield new items from ListingGenerators and ``None`` when paused.\n \n :param function: A callable that returns a ListingGenerator, e.g.\n@@ -90,6 +90,10 @@\n ``0`` yields ``None`` after every response resulting in no new items,\n and a value of ``None`` never introduces a pause (default: None).\n \n+ :param skip_existing: When True does not yield any results from the first\n+ request thereby skipping any items that existed in the stream prior to\n+ starting the stream (default: False).\n+\n .. note:: This function internally uses an exponential delay with jitter\n between subsequent responses that contain no new results, up to a\n maximum delay of just over a 16 seconds. In practice that means that the\n@@ -159,8 +163,10 @@\n found = True\n seen_fullnames.add(item.fullname)\n newest_fullname = item.fullname\n- yield item\n+ if not skip_existing:\n+ yield item\n before_fullname = newest_fullname\n+ skip_existing = False\n if valid_pause_after and pause_after < 0:\n yield None\n elif found:\ndiff --git a/praw/objector.py b/praw/objector.py\n--- a/praw/objector.py\n+++ b/praw/objector.py\n@@ -42,9 +42,12 @@\n :param instance: An instance of a subclass of RedditBase.\n \n \"\"\"\n+ retval = None\n for key in self.parsers:\n if isinstance(instance, self.parsers[key]):\n- return key\n+ retval = key\n+ break\n+ return retval\n \n def _objectify_dict(self, data):\n \"\"\"Create RedditBase objects from dicts.\n@@ -100,7 +103,7 @@\n \"\"\"\n # pylint: disable=too-many-return-statements\n if data is None: # 204 no content\n- return\n+ return None\n if isinstance(data, list):\n return [self.objectify(item) for item in data]\n if 'kind' in data and data['kind'] in self.parsers:\n", "issue": "Hard-coded limit in stream_generator() function.\n## Issue Description\r\n\r\nWhen I'm create a subreddit read stream, I cannot configure how many items from the past to take. The value 100 is hard-coded in the function stream_generator(). As a result, if I want to start reading comments in subreddit from the current time, I still get up to 100 previous ones and I need to filter them. In my opinion, this value should be passed in the kwargs of function stream_generator().\r\n\r\n\r\n## System Information\r\n\r\n PRAW Version: 5.4.0\r\n Python Version: 3.6\r\nOperating System: Debian 9\r\n\n", "before_files": [{"content": "\"\"\"Provide helper classes used by other models.\"\"\"\nimport random\nimport time\n\n\nclass BoundedSet(object):\n \"\"\"A set with a maximum size that evicts the oldest items when necessary.\n\n This class does not implement the complete set interface.\n \"\"\"\n\n def __init__(self, max_items):\n \"\"\"Construct an instance of the BoundedSet.\"\"\"\n self.max_items = max_items\n self._fifo = []\n self._set = set()\n\n def __contains__(self, item):\n \"\"\"Test if the BoundedSet contains item.\"\"\"\n return item in self._set\n\n def add(self, item):\n \"\"\"Add an item to the set discarding the oldest item if necessary.\"\"\"\n if len(self._set) == self.max_items:\n self._set.remove(self._fifo.pop(0))\n self._fifo.append(item)\n self._set.add(item)\n\n\nclass ExponentialCounter(object):\n \"\"\"A class to provide an exponential counter with jitter.\"\"\"\n\n def __init__(self, max_counter):\n \"\"\"Initialize an instance of ExponentialCounter.\n\n :param max_counter: The maximum base value. Note that the computed\n value may be 3.125% higher due to jitter.\n \"\"\"\n self._base = 1\n self._max = max_counter\n\n def counter(self):\n \"\"\"Increment the counter and return the current value with jitter.\"\"\"\n max_jitter = self._base / 16.\n value = self._base + random.random() * max_jitter - max_jitter / 2\n self._base = min(self._base * 2, self._max)\n return value\n\n def reset(self):\n \"\"\"Reset the counter to 1.\"\"\"\n self._base = 1\n\n\ndef permissions_string(permissions, known_permissions):\n \"\"\"Return a comma separated string of permission changes.\n\n :param permissions: A list of strings, or ``None``. These strings can\n exclusively contain ``+`` or ``-`` prefixes, or contain no prefixes at\n all. When prefixed, the resulting string will simply be the joining of\n these inputs. When not prefixed, all permissions are considered to be\n additions, and all permissions in the ``known_permissions`` set that\n aren't provided are considered to be removals. When None, the result is\n ``+all``.\n :param known_permissions: A set of strings representing the available\n permissions.\n\n \"\"\"\n to_set = []\n if permissions is None:\n to_set = ['+all']\n else:\n to_set = ['-all']\n omitted = sorted(known_permissions - set(permissions))\n to_set.extend('-{}'.format(x) for x in omitted)\n to_set.extend('+{}'.format(x) for x in permissions)\n return ','.join(to_set)\n\n\ndef stream_generator(function, pause_after=None):\n \"\"\"Yield new items from ListingGenerators and ``None`` when paused.\n\n :param function: A callable that returns a ListingGenerator, e.g.\n ``subreddit.comments`` or ``subreddit.new``.\n\n :param pause_after: An integer representing the number of requests that\n result in no new items before this function yields ``None``,\n effectively introducing a pause into the stream. A negative value\n yields ``None`` after items from a single response have been yielded,\n regardless of number of new items obtained in that response. A value of\n ``0`` yields ``None`` after every response resulting in no new items,\n and a value of ``None`` never introduces a pause (default: None).\n\n .. note:: This function internally uses an exponential delay with jitter\n between subsequent responses that contain no new results, up to a\n maximum delay of just over a 16 seconds. In practice that means that the\n time before pause for ``pause_after=N+1`` is approximately twice the\n time before pause for ``pause_after=N``.\n\n For example, to create a stream of comment replies, try:\n\n .. code:: python\n\n reply_function = reddit.inbox.comment_replies\n for reply in praw.models.util.stream_generator(reply_function):\n print(reply)\n\n To pause a comment stream after six responses with no new\n comments, try:\n\n .. code:: python\n\n subreddit = reddit.subreddit('redditdev')\n for comment in subreddit.stream.comments(pause_after=6):\n if comment is None:\n break\n print(comment)\n\n To resume fetching comments after a pause, try:\n\n .. code:: python\n\n subreddit = reddit.subreddit('help')\n comment_stream = subreddit.stream.comments(pause_after=5)\n\n for comment in comment_stream:\n if comment is None:\n break\n print(comment)\n # Do any other processing, then try to fetch more data\n for comment in comment_stream:\n if comment is None:\n break\n print(comment)\n\n To bypass the internal exponential backoff, try the following. This\n approach is useful if you are monitoring a subreddit with infrequent\n activity, and you want the to consistently learn about new items from the\n stream as soon as possible, rather than up to a delay of just over sixteen\n seconds.\n\n .. code:: python\n\n subreddit = reddit.subreddit('help')\n for comment in subreddit.stream.comments(pause_after=0):\n if comment is None:\n continue\n print(comment)\n\n \"\"\"\n before_fullname = None\n exponential_counter = ExponentialCounter(max_counter=16)\n seen_fullnames = BoundedSet(301)\n without_before_counter = 0\n responses_without_new = 0\n valid_pause_after = pause_after is not None\n while True:\n found = False\n newest_fullname = None\n limit = 100\n if before_fullname is None:\n limit -= without_before_counter\n without_before_counter = (without_before_counter + 1) % 30\n for item in reversed(list(function(\n limit=limit, params={'before': before_fullname}))):\n if item.fullname in seen_fullnames:\n continue\n found = True\n seen_fullnames.add(item.fullname)\n newest_fullname = item.fullname\n yield item\n before_fullname = newest_fullname\n if valid_pause_after and pause_after < 0:\n yield None\n elif found:\n exponential_counter.reset()\n responses_without_new = 0\n else:\n responses_without_new += 1\n if valid_pause_after and responses_without_new > pause_after:\n exponential_counter.reset()\n responses_without_new = 0\n yield None\n else:\n time.sleep(exponential_counter.counter())\n", "path": "praw/models/util.py"}, {"content": "\"\"\"Provides the Objector class.\"\"\"\nimport re\n\nfrom .exceptions import APIException\n\n\nclass Objector(object):\n \"\"\"The objector builds :class:`.RedditBase` objects.\"\"\"\n\n @staticmethod\n def _camel_to_snake(name):\n \"\"\"Return `name` converted from camelCase to snake_case.\n\n Code from http://stackoverflow.com/a/1176023/.\n\n \"\"\"\n first_break_replaced = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n return re.sub(\n '([a-z0-9])([A-Z])', r'\\1_\\2', first_break_replaced).lower()\n\n @classmethod\n def _snake_case_keys(cls, dictionary):\n \"\"\"Return a copy of dictionary with keys converted to snake_case.\n\n :param dictionary: The dict to be corrected.\n\n \"\"\"\n return {cls._camel_to_snake(k): v for k, v in dictionary.items()}\n\n def __init__(self, reddit):\n \"\"\"Initialize an Objector instance.\n\n :param reddit: An instance of :class:`~.Reddit`.\n\n \"\"\"\n self.parsers = {}\n self._reddit = reddit\n\n def kind(self, instance):\n \"\"\"Return the kind from the instance class.\n\n :param instance: An instance of a subclass of RedditBase.\n\n \"\"\"\n for key in self.parsers:\n if isinstance(instance, self.parsers[key]):\n return key\n\n def _objectify_dict(self, data):\n \"\"\"Create RedditBase objects from dicts.\n\n :param data: The structured data, assumed to be a dict.\n :returns: An instance of :class:`~.RedditBase`.\n\n \"\"\"\n if ({'conversation', 'messages', 'modActions'}.issubset(data)):\n parser = self.parsers['ModmailConversation']\n elif {'actionTypeId', 'author', 'date'}.issubset(data):\n # Modmail mod action\n data = self._snake_case_keys(data)\n parser = self.parsers['ModmailAction']\n elif {'bodyMarkdown', 'isInternal'}.issubset(data):\n # Modmail message\n data = self._snake_case_keys(data)\n parser = self.parsers['ModmailMessage']\n elif {'isAdmin', 'isDeleted'}.issubset(data):\n # Modmail author\n data = self._snake_case_keys(data)\n # Prevent clobbering base-36 id\n del data['id']\n data['is_subreddit_mod'] = data.pop('is_mod')\n parser = self.parsers[self._reddit.config.kinds['redditor']]\n elif {'banStatus', 'muteStatus', 'recentComments'}.issubset(data):\n # Modmail user\n data = self._snake_case_keys(data)\n data['created_string'] = data.pop('created')\n parser = self.parsers[self._reddit.config.kinds['redditor']]\n elif {'displayName', 'id', 'type'}.issubset(data):\n # Modmail subreddit\n data = self._snake_case_keys(data)\n parser = self.parsers[self._reddit.config.kinds[data['type']]]\n elif ({'date', 'id', 'name'}.issubset(data)\n or {'id', 'name', 'permissions'}.issubset(data)):\n parser = self.parsers[self._reddit.config.kinds['redditor']]\n else:\n if 'user' in data:\n parser = self.parsers[self._reddit.config.kinds['redditor']]\n data['user'] = parser.parse({'name': data['user']},\n self._reddit)\n return data\n return parser.parse(data, self._reddit)\n\n def objectify(self, data):\n \"\"\"Create RedditBase objects from data.\n\n :param data: The structured data.\n :returns: An instance of :class:`~.RedditBase`, or ``None`` if\n given ``data`` is ``None``.\n\n \"\"\"\n # pylint: disable=too-many-return-statements\n if data is None: # 204 no content\n return\n if isinstance(data, list):\n return [self.objectify(item) for item in data]\n if 'kind' in data and data['kind'] in self.parsers:\n parser = self.parsers[data['kind']]\n return parser.parse(data['data'], self._reddit)\n elif 'json' in data and 'data' in data['json']:\n if 'things' in data['json']['data']: # Submission.reply\n return self.objectify(data['json']['data']['things'])\n if 'url' in data['json']['data']: # Subreddit.submit\n # The URL is the URL to the submission, so it's removed.\n del data['json']['data']['url']\n parser = self.parsers[self._reddit.config.kinds['submission']]\n else:\n parser = self.parsers['LiveUpdateEvent']\n return parser.parse(data['json']['data'], self._reddit)\n elif 'json' in data and 'errors' in data['json']:\n errors = data['json']['errors']\n if len(errors) == 1:\n raise APIException(*errors[0])\n assert not errors\n\n elif isinstance(data, dict):\n return self._objectify_dict(data)\n\n return data\n\n def register(self, kind, cls):\n \"\"\"Register a class for a given kind.\n\n :param kind: The kind in the parsed data to map to ``cls``.\n :param cls: A RedditBase class.\n\n \"\"\"\n self.parsers[kind] = cls\n", "path": "praw/objector.py"}]} | 4,074 | 550 |
gh_patches_debug_17295 | rasdani/github-patches | git_diff | cloudtools__troposphere-844 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
S3 Bucket constructor
**Expected result:** troposphere.s3.Bucket constructor accepts `template` as a second parameter.
**Actual result:** troposphere.s3.Bucket constructor does not accept `template` as a second parameter.
Example of code that should work:
```python
template = Template()
Bucket(
'PlaylistsFailoverS3Bucket',
template,
AccessControl=Private,
BucketName="test-bucket",
)
```
</issue>
<code>
[start of troposphere/s3.py]
1 # Copyright (c) 2013, Bob Van Zant <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5 import warnings
6
7 from . import AWSObject, AWSProperty, Tags
8 from .validators import positive_integer, s3_bucket_name
9 from .validators import s3_transfer_acceleration_status
10
11 try:
12 from awacs.aws import Policy
13
14 policytypes = (dict, Policy)
15 except ImportError:
16 policytypes = dict,
17
18 Private = "Private"
19 PublicRead = "PublicRead"
20 PublicReadWrite = "PublicReadWrite"
21 AuthenticatedRead = "AuthenticatedRead"
22 BucketOwnerRead = "BucketOwnerRead"
23 BucketOwnerFullControl = "BucketOwnerFullControl"
24 LogDeliveryWrite = "LogDeliveryWrite"
25
26
27 class CorsRules(AWSProperty):
28 props = {
29 'AllowedHeaders': ([basestring], False),
30 'AllowedMethods': ([basestring], True),
31 'AllowedOrigins': ([basestring], True),
32 'ExposedHeaders': ([basestring], False),
33 'Id': (basestring, False),
34 'MaxAge': (positive_integer, False),
35 }
36
37
38 class CorsConfiguration(AWSProperty):
39 props = {
40 'CorsRules': ([CorsRules], True),
41 }
42
43
44 class VersioningConfiguration(AWSProperty):
45 props = {
46 'Status': (basestring, False),
47 }
48
49
50 class AccelerateConfiguration(AWSProperty):
51 props = {
52 'AccelerationStatus': (s3_transfer_acceleration_status, True),
53 }
54
55
56 class RedirectAllRequestsTo(AWSProperty):
57 props = {
58 'HostName': (basestring, True),
59 'Protocol': (basestring, False),
60 }
61
62
63 class RedirectRule(AWSProperty):
64 props = {
65 'HostName': (basestring, False),
66 'HttpRedirectCode': (basestring, False),
67 'Protocol': (basestring, False),
68 'ReplaceKeyPrefixWith': (basestring, False),
69 'ReplaceKeyWith': (basestring, False),
70 }
71
72
73 class RoutingRuleCondition(AWSProperty):
74 props = {
75 'HttpErrorCodeReturnedEquals': (basestring, False),
76 'KeyPrefixEquals': (basestring, False),
77 }
78
79
80 class RoutingRule(AWSProperty):
81 props = {
82 'RedirectRule': (RedirectRule, True),
83 'RoutingRuleCondition': (RoutingRuleCondition, False),
84 }
85
86
87 class WebsiteConfiguration(AWSProperty):
88 props = {
89 'IndexDocument': (basestring, False),
90 'ErrorDocument': (basestring, False),
91 'RedirectAllRequestsTo': (RedirectAllRequestsTo, False),
92 'RoutingRules': ([RoutingRule], False),
93 }
94
95
96 class LifecycleRuleTransition(AWSProperty):
97 props = {
98 'StorageClass': (basestring, True),
99 'TransitionDate': (basestring, False),
100 'TransitionInDays': (positive_integer, False),
101 }
102
103
104 class AbortIncompleteMultipartUpload(AWSProperty):
105 props = {
106 'DaysAfterInitiation': (positive_integer, True),
107 }
108
109
110 class NoncurrentVersionTransition(AWSProperty):
111 props = {
112 'StorageClass': (basestring, True),
113 'TransitionInDays': (positive_integer, True),
114 }
115
116
117 class TagFilter(AWSProperty):
118 props = {
119 'Key': (basestring, True),
120 'Value': (basestring, True),
121 }
122
123
124 class LifecycleRule(AWSProperty):
125 props = {
126 'AbortIncompleteMultipartUpload':
127 (AbortIncompleteMultipartUpload, False),
128 'ExpirationDate': (basestring, False),
129 'ExpirationInDays': (positive_integer, False),
130 'Id': (basestring, False),
131 'NoncurrentVersionExpirationInDays': (positive_integer, False),
132 'NoncurrentVersionTransition': (NoncurrentVersionTransition, False),
133 'NoncurrentVersionTransitions': ([NoncurrentVersionTransition], False),
134 'Prefix': (basestring, False),
135 'Status': (basestring, True),
136 'TagFilters': ([TagFilter], False),
137 'Transition': (LifecycleRuleTransition, False),
138 'Transitions': ([LifecycleRuleTransition], False)
139 }
140
141 def validate(self):
142 if 'Transition' in self.properties:
143 if 'Transitions' not in self.properties:
144 # aws moved from a single transition to a list of them
145 # and deprecated 'Transition', so let's just move it to
146 # the new property and not annoy the user.
147 self.properties['Transitions'] = [
148 self.properties.pop('Transition')]
149 else:
150 raise ValueError(
151 'Cannot specify both "Transition" and "Transitions" '
152 'properties on S3 Bucket Lifecycle Rule. Please use '
153 '"Transitions" since the former has been deprecated.')
154
155 if 'NoncurrentVersionTransition' in self.properties:
156 if 'NoncurrentVersionTransitions' not in self.properties:
157 warnings.warn(
158 'NoncurrentVersionTransition has been deprecated in '
159 'favour of NoncurrentVersionTransitions.'
160 )
161 # Translate the old transition format to the new format
162 self.properties['NoncurrentVersionTransitions'] = [
163 self.properties.pop('NoncurrentVersionTransition')]
164 else:
165 raise ValueError(
166 'Cannot specify both "NoncurrentVersionTransition" and '
167 '"NoncurrentVersionTransitions" properties on S3 Bucket '
168 'Lifecycle Rule. Please use '
169 '"NoncurrentVersionTransitions" since the former has been '
170 'deprecated.')
171
172 if 'ExpirationInDays' in self.properties and 'ExpirationDate' in \
173 self.properties:
174 raise ValueError(
175 'Cannot specify both "ExpirationDate" and "ExpirationInDays"'
176 )
177
178
179 class LifecycleConfiguration(AWSProperty):
180 props = {
181 'Rules': ([LifecycleRule], True),
182 }
183
184
185 class LoggingConfiguration(AWSProperty):
186 props = {
187 'DestinationBucketName': (s3_bucket_name, False),
188 'LogFilePrefix': (basestring, False),
189 }
190
191
192 class Rules(AWSProperty):
193 props = {
194 'Name': (basestring, True),
195 'Value': (basestring, True)
196 }
197
198
199 class S3Key(AWSProperty):
200 props = {
201 'Rules': ([Rules], True)
202 }
203
204
205 class Filter(AWSProperty):
206 props = {
207 'S3Key': (S3Key, True)
208 }
209
210
211 class LambdaConfigurations(AWSProperty):
212 props = {
213 'Event': (basestring, True),
214 'Filter': (Filter, False),
215 'Function': (basestring, True),
216 }
217
218
219 class QueueConfigurations(AWSProperty):
220 props = {
221 'Event': (basestring, True),
222 'Filter': (Filter, False),
223 'Queue': (basestring, True),
224 }
225
226
227 class TopicConfigurations(AWSProperty):
228 props = {
229 'Event': (basestring, True),
230 'Filter': (Filter, False),
231 'Topic': (basestring, True),
232 }
233
234
235 class MetricsConfiguration(AWSProperty):
236 props = {
237 'Id': (basestring, True),
238 'Prefix': (basestring, False),
239 'TagFilters': ([TagFilter], False),
240 }
241
242
243 class NotificationConfiguration(AWSProperty):
244 props = {
245 'LambdaConfigurations': ([LambdaConfigurations], False),
246 'QueueConfigurations': ([QueueConfigurations], False),
247 'TopicConfigurations': ([TopicConfigurations], False),
248 }
249
250
251 class ReplicationConfigurationRulesDestination(AWSProperty):
252 props = {
253 'Bucket': (basestring, True),
254 'StorageClass': (basestring, False)
255 }
256
257
258 class ReplicationConfigurationRules(AWSProperty):
259 props = {
260 'Destination': (ReplicationConfigurationRulesDestination, True),
261 'Id': (basestring, False),
262 'Prefix': (basestring, True),
263 'Status': (basestring, True)
264 }
265
266
267 class ReplicationConfiguration(AWSProperty):
268 props = {
269 'Role': (basestring, True),
270 'Rules': ([ReplicationConfigurationRules], True)
271 }
272
273
274 class Bucket(AWSObject):
275 resource_type = "AWS::S3::Bucket"
276
277 props = {
278 'AccessControl': (basestring, False),
279 'AccelerateConfiguration': (AccelerateConfiguration, False),
280 'BucketName': (s3_bucket_name, False),
281 'CorsConfiguration': (CorsConfiguration, False),
282 'LifecycleConfiguration': (LifecycleConfiguration, False),
283 'LoggingConfiguration': (LoggingConfiguration, False),
284 'MetricsConfigurations': ([MetricsConfiguration], False),
285 'NotificationConfiguration': (NotificationConfiguration, False),
286 'ReplicationConfiguration': (ReplicationConfiguration, False),
287 'Tags': (Tags, False),
288 'WebsiteConfiguration': (WebsiteConfiguration, False),
289 'VersioningConfiguration': (VersioningConfiguration, False)
290 }
291
292 access_control_types = [
293 Private,
294 PublicRead,
295 PublicReadWrite,
296 AuthenticatedRead,
297 BucketOwnerRead,
298 BucketOwnerFullControl,
299 LogDeliveryWrite,
300 ]
301
302 def __init__(self, name=None, **kwargs):
303
304 # note: 'name' is the resource title, not the bucket name
305
306 if not name and 'title' in kwargs:
307 name = kwargs.pop('title')
308 if not name:
309 raise TypeError("You must provide a title for the bucket resource")
310 super(Bucket, self).__init__(name, **kwargs)
311
312 if 'AccessControl' in kwargs and \
313 isinstance(kwargs['AccessControl'], basestring):
314 if kwargs['AccessControl'] not in self.access_control_types:
315 raise ValueError('AccessControl must be one of "%s"' % (
316 ', '.join(self.access_control_types)))
317
318
319 class BucketPolicy(AWSObject):
320 resource_type = "AWS::S3::BucketPolicy"
321
322 props = {
323 'Bucket': (basestring, True),
324 'PolicyDocument': (policytypes, True),
325 }
326
[end of troposphere/s3.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/troposphere/s3.py b/troposphere/s3.py
--- a/troposphere/s3.py
+++ b/troposphere/s3.py
@@ -299,19 +299,10 @@
LogDeliveryWrite,
]
- def __init__(self, name=None, **kwargs):
-
- # note: 'name' is the resource title, not the bucket name
-
- if not name and 'title' in kwargs:
- name = kwargs.pop('title')
- if not name:
- raise TypeError("You must provide a title for the bucket resource")
- super(Bucket, self).__init__(name, **kwargs)
-
- if 'AccessControl' in kwargs and \
- isinstance(kwargs['AccessControl'], basestring):
- if kwargs['AccessControl'] not in self.access_control_types:
+ def validate(self):
+ access_control = self.properties.get('AccessControl')
+ if access_control is not None:
+ if access_control not in self.access_control_types:
raise ValueError('AccessControl must be one of "%s"' % (
', '.join(self.access_control_types)))
| {"golden_diff": "diff --git a/troposphere/s3.py b/troposphere/s3.py\n--- a/troposphere/s3.py\n+++ b/troposphere/s3.py\n@@ -299,19 +299,10 @@\n LogDeliveryWrite,\n ]\n \n- def __init__(self, name=None, **kwargs):\n-\n- # note: 'name' is the resource title, not the bucket name\n-\n- if not name and 'title' in kwargs:\n- name = kwargs.pop('title')\n- if not name:\n- raise TypeError(\"You must provide a title for the bucket resource\")\n- super(Bucket, self).__init__(name, **kwargs)\n-\n- if 'AccessControl' in kwargs and \\\n- isinstance(kwargs['AccessControl'], basestring):\n- if kwargs['AccessControl'] not in self.access_control_types:\n+ def validate(self):\n+ access_control = self.properties.get('AccessControl')\n+ if access_control is not None:\n+ if access_control not in self.access_control_types:\n raise ValueError('AccessControl must be one of \"%s\"' % (\n ', '.join(self.access_control_types)))\n", "issue": "S3 Bucket constructor\n**Expected result:** troposphere.s3.Bucket constructor accepts `template` as a second parameter.\r\n**Actual result:** troposphere.s3.Bucket constructor does not accept `template` as a second parameter.\r\n\r\nExample of code that should work:\r\n```python\r\ntemplate = Template()\r\nBucket(\r\n 'PlaylistsFailoverS3Bucket',\r\n template,\r\n AccessControl=Private,\r\n BucketName=\"test-bucket\",\r\n)\r\n```\n", "before_files": [{"content": "# Copyright (c) 2013, Bob Van Zant <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\nimport warnings\n\nfrom . import AWSObject, AWSProperty, Tags\nfrom .validators import positive_integer, s3_bucket_name\nfrom .validators import s3_transfer_acceleration_status\n\ntry:\n from awacs.aws import Policy\n\n policytypes = (dict, Policy)\nexcept ImportError:\n policytypes = dict,\n\nPrivate = \"Private\"\nPublicRead = \"PublicRead\"\nPublicReadWrite = \"PublicReadWrite\"\nAuthenticatedRead = \"AuthenticatedRead\"\nBucketOwnerRead = \"BucketOwnerRead\"\nBucketOwnerFullControl = \"BucketOwnerFullControl\"\nLogDeliveryWrite = \"LogDeliveryWrite\"\n\n\nclass CorsRules(AWSProperty):\n props = {\n 'AllowedHeaders': ([basestring], False),\n 'AllowedMethods': ([basestring], True),\n 'AllowedOrigins': ([basestring], True),\n 'ExposedHeaders': ([basestring], False),\n 'Id': (basestring, False),\n 'MaxAge': (positive_integer, False),\n }\n\n\nclass CorsConfiguration(AWSProperty):\n props = {\n 'CorsRules': ([CorsRules], True),\n }\n\n\nclass VersioningConfiguration(AWSProperty):\n props = {\n 'Status': (basestring, False),\n }\n\n\nclass AccelerateConfiguration(AWSProperty):\n props = {\n 'AccelerationStatus': (s3_transfer_acceleration_status, True),\n }\n\n\nclass RedirectAllRequestsTo(AWSProperty):\n props = {\n 'HostName': (basestring, True),\n 'Protocol': (basestring, False),\n }\n\n\nclass RedirectRule(AWSProperty):\n props = {\n 'HostName': (basestring, False),\n 'HttpRedirectCode': (basestring, False),\n 'Protocol': (basestring, False),\n 'ReplaceKeyPrefixWith': (basestring, False),\n 'ReplaceKeyWith': (basestring, False),\n }\n\n\nclass RoutingRuleCondition(AWSProperty):\n props = {\n 'HttpErrorCodeReturnedEquals': (basestring, False),\n 'KeyPrefixEquals': (basestring, False),\n }\n\n\nclass RoutingRule(AWSProperty):\n props = {\n 'RedirectRule': (RedirectRule, True),\n 'RoutingRuleCondition': (RoutingRuleCondition, False),\n }\n\n\nclass WebsiteConfiguration(AWSProperty):\n props = {\n 'IndexDocument': (basestring, False),\n 'ErrorDocument': (basestring, False),\n 'RedirectAllRequestsTo': (RedirectAllRequestsTo, False),\n 'RoutingRules': ([RoutingRule], False),\n }\n\n\nclass LifecycleRuleTransition(AWSProperty):\n props = {\n 'StorageClass': (basestring, True),\n 'TransitionDate': (basestring, False),\n 'TransitionInDays': (positive_integer, False),\n }\n\n\nclass AbortIncompleteMultipartUpload(AWSProperty):\n props = {\n 'DaysAfterInitiation': (positive_integer, True),\n }\n\n\nclass NoncurrentVersionTransition(AWSProperty):\n props = {\n 'StorageClass': (basestring, True),\n 'TransitionInDays': (positive_integer, True),\n }\n\n\nclass TagFilter(AWSProperty):\n props = {\n 'Key': (basestring, True),\n 'Value': (basestring, True),\n }\n\n\nclass LifecycleRule(AWSProperty):\n props = {\n 'AbortIncompleteMultipartUpload':\n (AbortIncompleteMultipartUpload, False),\n 'ExpirationDate': (basestring, False),\n 'ExpirationInDays': (positive_integer, False),\n 'Id': (basestring, False),\n 'NoncurrentVersionExpirationInDays': (positive_integer, False),\n 'NoncurrentVersionTransition': (NoncurrentVersionTransition, False),\n 'NoncurrentVersionTransitions': ([NoncurrentVersionTransition], False),\n 'Prefix': (basestring, False),\n 'Status': (basestring, True),\n 'TagFilters': ([TagFilter], False),\n 'Transition': (LifecycleRuleTransition, False),\n 'Transitions': ([LifecycleRuleTransition], False)\n }\n\n def validate(self):\n if 'Transition' in self.properties:\n if 'Transitions' not in self.properties:\n # aws moved from a single transition to a list of them\n # and deprecated 'Transition', so let's just move it to\n # the new property and not annoy the user.\n self.properties['Transitions'] = [\n self.properties.pop('Transition')]\n else:\n raise ValueError(\n 'Cannot specify both \"Transition\" and \"Transitions\" '\n 'properties on S3 Bucket Lifecycle Rule. Please use '\n '\"Transitions\" since the former has been deprecated.')\n\n if 'NoncurrentVersionTransition' in self.properties:\n if 'NoncurrentVersionTransitions' not in self.properties:\n warnings.warn(\n 'NoncurrentVersionTransition has been deprecated in '\n 'favour of NoncurrentVersionTransitions.'\n )\n # Translate the old transition format to the new format\n self.properties['NoncurrentVersionTransitions'] = [\n self.properties.pop('NoncurrentVersionTransition')]\n else:\n raise ValueError(\n 'Cannot specify both \"NoncurrentVersionTransition\" and '\n '\"NoncurrentVersionTransitions\" properties on S3 Bucket '\n 'Lifecycle Rule. Please use '\n '\"NoncurrentVersionTransitions\" since the former has been '\n 'deprecated.')\n\n if 'ExpirationInDays' in self.properties and 'ExpirationDate' in \\\n self.properties:\n raise ValueError(\n 'Cannot specify both \"ExpirationDate\" and \"ExpirationInDays\"'\n )\n\n\nclass LifecycleConfiguration(AWSProperty):\n props = {\n 'Rules': ([LifecycleRule], True),\n }\n\n\nclass LoggingConfiguration(AWSProperty):\n props = {\n 'DestinationBucketName': (s3_bucket_name, False),\n 'LogFilePrefix': (basestring, False),\n }\n\n\nclass Rules(AWSProperty):\n props = {\n 'Name': (basestring, True),\n 'Value': (basestring, True)\n }\n\n\nclass S3Key(AWSProperty):\n props = {\n 'Rules': ([Rules], True)\n }\n\n\nclass Filter(AWSProperty):\n props = {\n 'S3Key': (S3Key, True)\n }\n\n\nclass LambdaConfigurations(AWSProperty):\n props = {\n 'Event': (basestring, True),\n 'Filter': (Filter, False),\n 'Function': (basestring, True),\n }\n\n\nclass QueueConfigurations(AWSProperty):\n props = {\n 'Event': (basestring, True),\n 'Filter': (Filter, False),\n 'Queue': (basestring, True),\n }\n\n\nclass TopicConfigurations(AWSProperty):\n props = {\n 'Event': (basestring, True),\n 'Filter': (Filter, False),\n 'Topic': (basestring, True),\n }\n\n\nclass MetricsConfiguration(AWSProperty):\n props = {\n 'Id': (basestring, True),\n 'Prefix': (basestring, False),\n 'TagFilters': ([TagFilter], False),\n }\n\n\nclass NotificationConfiguration(AWSProperty):\n props = {\n 'LambdaConfigurations': ([LambdaConfigurations], False),\n 'QueueConfigurations': ([QueueConfigurations], False),\n 'TopicConfigurations': ([TopicConfigurations], False),\n }\n\n\nclass ReplicationConfigurationRulesDestination(AWSProperty):\n props = {\n 'Bucket': (basestring, True),\n 'StorageClass': (basestring, False)\n }\n\n\nclass ReplicationConfigurationRules(AWSProperty):\n props = {\n 'Destination': (ReplicationConfigurationRulesDestination, True),\n 'Id': (basestring, False),\n 'Prefix': (basestring, True),\n 'Status': (basestring, True)\n }\n\n\nclass ReplicationConfiguration(AWSProperty):\n props = {\n 'Role': (basestring, True),\n 'Rules': ([ReplicationConfigurationRules], True)\n }\n\n\nclass Bucket(AWSObject):\n resource_type = \"AWS::S3::Bucket\"\n\n props = {\n 'AccessControl': (basestring, False),\n 'AccelerateConfiguration': (AccelerateConfiguration, False),\n 'BucketName': (s3_bucket_name, False),\n 'CorsConfiguration': (CorsConfiguration, False),\n 'LifecycleConfiguration': (LifecycleConfiguration, False),\n 'LoggingConfiguration': (LoggingConfiguration, False),\n 'MetricsConfigurations': ([MetricsConfiguration], False),\n 'NotificationConfiguration': (NotificationConfiguration, False),\n 'ReplicationConfiguration': (ReplicationConfiguration, False),\n 'Tags': (Tags, False),\n 'WebsiteConfiguration': (WebsiteConfiguration, False),\n 'VersioningConfiguration': (VersioningConfiguration, False)\n }\n\n access_control_types = [\n Private,\n PublicRead,\n PublicReadWrite,\n AuthenticatedRead,\n BucketOwnerRead,\n BucketOwnerFullControl,\n LogDeliveryWrite,\n ]\n\n def __init__(self, name=None, **kwargs):\n\n # note: 'name' is the resource title, not the bucket name\n\n if not name and 'title' in kwargs:\n name = kwargs.pop('title')\n if not name:\n raise TypeError(\"You must provide a title for the bucket resource\")\n super(Bucket, self).__init__(name, **kwargs)\n\n if 'AccessControl' in kwargs and \\\n isinstance(kwargs['AccessControl'], basestring):\n if kwargs['AccessControl'] not in self.access_control_types:\n raise ValueError('AccessControl must be one of \"%s\"' % (\n ', '.join(self.access_control_types)))\n\n\nclass BucketPolicy(AWSObject):\n resource_type = \"AWS::S3::BucketPolicy\"\n\n props = {\n 'Bucket': (basestring, True),\n 'PolicyDocument': (policytypes, True),\n }\n", "path": "troposphere/s3.py"}]} | 3,641 | 252 |
gh_patches_debug_9245 | rasdani/github-patches | git_diff | translate__pootle-6371 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fulah locale exposes _XXX_LAST_SUBMISSION_
When in the Fulah (ff) locale, all pages expose the `_XXX_LAST_SUBMISSION_` text used to mangle timesince messages.

</issue>
<code>
[start of pootle/i18n/dates.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from datetime import datetime
10
11
12 from .formatter import get_locale_formats
13
14
15 def timesince(timestamp, locale=None):
16 timedelta = datetime.now() - datetime.fromtimestamp(timestamp)
17 return get_locale_formats(locale).timedelta(timedelta, format='long')
18
[end of pootle/i18n/dates.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/i18n/dates.py b/pootle/i18n/dates.py
--- a/pootle/i18n/dates.py
+++ b/pootle/i18n/dates.py
@@ -8,10 +8,15 @@
from datetime import datetime
+from django.conf import settings
from .formatter import get_locale_formats
def timesince(timestamp, locale=None):
timedelta = datetime.now() - datetime.fromtimestamp(timestamp)
- return get_locale_formats(locale).timedelta(timedelta, format='long')
+ formatted = get_locale_formats(locale).timedelta(timedelta, format='long')
+ if formatted:
+ return formatted
+ return get_locale_formats(
+ settings.LANGUAGE_CODE).timedelta(timedelta, format='long')
| {"golden_diff": "diff --git a/pootle/i18n/dates.py b/pootle/i18n/dates.py\n--- a/pootle/i18n/dates.py\n+++ b/pootle/i18n/dates.py\n@@ -8,10 +8,15 @@\n \n from datetime import datetime\n \n+from django.conf import settings\n \n from .formatter import get_locale_formats\n \n \n def timesince(timestamp, locale=None):\n timedelta = datetime.now() - datetime.fromtimestamp(timestamp)\n- return get_locale_formats(locale).timedelta(timedelta, format='long')\n+ formatted = get_locale_formats(locale).timedelta(timedelta, format='long')\n+ if formatted:\n+ return formatted\n+ return get_locale_formats(\n+ settings.LANGUAGE_CODE).timedelta(timedelta, format='long')\n", "issue": "Fulah locale exposes _XXX_LAST_SUBMISSION_\nWhen in the Fulah (ff) locale, all pages expose the `_XXX_LAST_SUBMISSION_` text used to mangle timesince messages.\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom datetime import datetime\n\n\nfrom .formatter import get_locale_formats\n\n\ndef timesince(timestamp, locale=None):\n timedelta = datetime.now() - datetime.fromtimestamp(timestamp)\n return get_locale_formats(locale).timedelta(timedelta, format='long')\n", "path": "pootle/i18n/dates.py"}]} | 821 | 177 |
gh_patches_debug_20316 | rasdani/github-patches | git_diff | gammapy__gammapy-5149 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Strange warning when running the `SensitivityEstimator`
**Gammapy version**
Current main:
```
Gammapy support for parallelisation with ray is still a prototype and is not fully functional.
System:
python_executable : /home/maxnoe/.local/conda/envs/gammapy-dev/bin/python3.9
python_version : 3.9.16
machine : x86_64
system : Linux
Gammapy package:
version : 1.2.dev201+g514451881.d20230627
path : /home/maxnoe/Projects/gammapy/gammapy
Other packages:
numpy : 1.25.0
scipy : 1.11.0
astropy : 5.3
regions : 0.7
click : 8.1.3
yaml : 6.0
IPython : 8.14.0
jupyterlab : 3.5.3
matplotlib : 3.7.1
pandas : 2.0.2
healpy : 1.16.2
iminuit : 2.22.0
sherpa : 4.15.1
naima : 0.10.0
emcee : 3.1.4
corner : 2.2.2
ray : 2.5.1
Gammapy environment variables:
GAMMAPY_DATA : /home/maxnoe/Projects/gammapy/gammapy-datasets/dev
```
**Bug description**
There is a warning I don't understand and that seems to be outside of my control and also is at least worded a bit strangely when running the `SensitivityEstimator`.
I guess this should also be a `GammapyDeprecationWarning` and not using the logging system.
> Table column name energy will be deprecated by e_ref since v1.2
What is the intention here? Will the `energy` column be removed? Would a better message then be:
> The column "energy" is deprecated as of Gammapy 1.x and will be removed in Gammapy 1.y. Use the `e_ref` column instead
?
</issue>
<code>
[start of gammapy/estimators/points/sensitivity.py]
1 # Licensed under a 3-clause BSD style license - see LICENSE.rst
2 import logging
3 import numpy as np
4 from astropy.table import Column, Table
5 from gammapy.maps import Map
6 from gammapy.modeling.models import PowerLawSpectralModel, SkyModel
7 from gammapy.stats import WStatCountsStatistic
8 from ..core import Estimator
9
10 __all__ = ["SensitivityEstimator"]
11
12
13 class SensitivityEstimator(Estimator):
14 """Estimate sensitivity.
15
16 This class allows to determine for each reconstructed energy bin the flux
17 associated to the number of gamma-ray events for which the significance is
18 ``n_sigma``, and being larger than ``gamma_min`` and ``bkg_sys`` percent
19 larger than the number of background events in the ON region.
20
21
22 Parameters
23 ----------
24 spectrum : `SpectralModel`
25 Spectral model assumption. Default is Power Law with index 2.
26 n_sigma : float, optional
27 Minimum significance. Default is 5.
28 gamma_min : float, optional
29 Minimum number of gamma-rays. Default is 10.
30 bkg_syst_fraction : float, optional
31 Fraction of background counts above which the number of gamma-rays is. Default is 0.05.
32
33 Examples
34 --------
35 For a usage example see :doc:`/tutorials/analysis-1d/cta_sensitivity` tutorial.
36
37 """
38
39 tag = "SensitivityEstimator"
40
41 def __init__(
42 self,
43 spectrum=None,
44 n_sigma=5.0,
45 gamma_min=10,
46 bkg_syst_fraction=0.05,
47 ):
48
49 if spectrum is None:
50 spectrum = PowerLawSpectralModel(index=2, amplitude="1 cm-2 s-1 TeV-1")
51
52 self.spectrum = spectrum
53 self.n_sigma = n_sigma
54 self.gamma_min = gamma_min
55 self.bkg_syst_fraction = bkg_syst_fraction
56
57 def estimate_min_excess(self, dataset):
58 """Estimate minimum excess to reach the given significance.
59
60 Parameters
61 ----------
62 dataset : `SpectrumDataset`
63 Spectrum dataset.
64
65 Returns
66 -------
67 excess : `~gammapy.maps.RegionNDMap`
68 Minimal excess.
69 """
70 n_off = dataset.counts_off.data
71
72 stat = WStatCountsStatistic(
73 n_on=dataset.alpha.data * n_off, n_off=n_off, alpha=dataset.alpha.data
74 )
75 excess_counts = stat.n_sig_matching_significance(self.n_sigma)
76 is_gamma_limited = excess_counts < self.gamma_min
77 excess_counts[is_gamma_limited] = self.gamma_min
78 bkg_syst_limited = (
79 excess_counts < self.bkg_syst_fraction * dataset.background.data
80 )
81 excess_counts[bkg_syst_limited] = (
82 self.bkg_syst_fraction * dataset.background.data[bkg_syst_limited]
83 )
84 excess = Map.from_geom(geom=dataset._geom, data=excess_counts)
85 return excess
86
87 def estimate_min_e2dnde(self, excess, dataset):
88 """Estimate dnde from a given minimum excess.
89
90 Parameters
91 ----------
92 excess : `~gammapy.maps.RegionNDMap`
93 Minimal excess.
94 dataset : `~gammapy.datasets.SpectrumDataset`
95 Spectrum dataset.
96
97 Returns
98 -------
99 e2dnde : `~astropy.units.Quantity`
100 Minimal differential flux.
101 """
102 energy = dataset._geom.axes["energy"].center
103
104 dataset.models = SkyModel(spectral_model=self.spectrum)
105 npred = dataset.npred_signal()
106
107 phi_0 = excess / npred
108
109 dnde_model = self.spectrum(energy=energy)
110 dnde = phi_0.data[:, 0, 0] * dnde_model * energy**2
111 return dnde.to("erg / (cm2 s)")
112
113 def _get_criterion(self, excess, bkg):
114 is_gamma_limited = excess == self.gamma_min
115 is_bkg_syst_limited = excess == bkg * self.bkg_syst_fraction
116 criterion = np.chararray(excess.shape, itemsize=12)
117 criterion[is_gamma_limited] = "gamma"
118 criterion[is_bkg_syst_limited] = "bkg"
119 criterion[~np.logical_or(is_gamma_limited, is_bkg_syst_limited)] = (
120 "significance"
121 )
122 return criterion
123
124 def run(self, dataset):
125 """Run the sensitivity estimation.
126
127 Parameters
128 ----------
129 dataset : `SpectrumDatasetOnOff`
130 Dataset to compute sensitivity for.
131
132 Returns
133 -------
134 sensitivity : `~astropy.table.Table`
135 Sensitivity table.
136 """
137 energy = dataset._geom.axes["energy"].center
138 excess = self.estimate_min_excess(dataset)
139 e2dnde = self.estimate_min_e2dnde(excess, dataset)
140 criterion = self._get_criterion(
141 excess.data.squeeze(), dataset.background.data.squeeze()
142 )
143 logging.warning(
144 "Table column name energy will be deprecated by e_ref since v1.2"
145 )
146
147 return Table(
148 [
149 Column(
150 data=energy,
151 name="energy",
152 format="5g",
153 description="Reconstructed Energy",
154 ),
155 Column(
156 data=energy,
157 name="e_ref",
158 format="5g",
159 description="Energy center",
160 ),
161 Column(
162 data=dataset._geom.axes["energy"].edges_min,
163 name="e_min",
164 format="5g",
165 description="Energy edge low",
166 ),
167 Column(
168 data=dataset._geom.axes["energy"].edges_max,
169 name="e_max",
170 format="5g",
171 description="Energy edge high",
172 ),
173 Column(
174 data=e2dnde,
175 name="e2dnde",
176 format="5g",
177 description="Energy squared times differential flux",
178 ),
179 Column(
180 data=np.atleast_1d(excess.data.squeeze()),
181 name="excess",
182 format="5g",
183 description="Number of excess counts in the bin",
184 ),
185 Column(
186 data=np.atleast_1d(dataset.background.data.squeeze()),
187 name="background",
188 format="5g",
189 description="Number of background counts in the bin",
190 ),
191 Column(
192 data=np.atleast_1d(criterion),
193 name="criterion",
194 description="Sensitivity-limiting criterion",
195 ),
196 ]
197 )
198
[end of gammapy/estimators/points/sensitivity.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gammapy/estimators/points/sensitivity.py b/gammapy/estimators/points/sensitivity.py
--- a/gammapy/estimators/points/sensitivity.py
+++ b/gammapy/estimators/points/sensitivity.py
@@ -1,5 +1,4 @@
# Licensed under a 3-clause BSD style license - see LICENSE.rst
-import logging
import numpy as np
from astropy.table import Column, Table
from gammapy.maps import Map
@@ -140,18 +139,9 @@
criterion = self._get_criterion(
excess.data.squeeze(), dataset.background.data.squeeze()
)
- logging.warning(
- "Table column name energy will be deprecated by e_ref since v1.2"
- )
return Table(
[
- Column(
- data=energy,
- name="energy",
- format="5g",
- description="Reconstructed Energy",
- ),
Column(
data=energy,
name="e_ref",
| {"golden_diff": "diff --git a/gammapy/estimators/points/sensitivity.py b/gammapy/estimators/points/sensitivity.py\n--- a/gammapy/estimators/points/sensitivity.py\n+++ b/gammapy/estimators/points/sensitivity.py\n@@ -1,5 +1,4 @@\n # Licensed under a 3-clause BSD style license - see LICENSE.rst\n-import logging\n import numpy as np\n from astropy.table import Column, Table\n from gammapy.maps import Map\n@@ -140,18 +139,9 @@\n criterion = self._get_criterion(\n excess.data.squeeze(), dataset.background.data.squeeze()\n )\n- logging.warning(\n- \"Table column name energy will be deprecated by e_ref since v1.2\"\n- )\n \n return Table(\n [\n- Column(\n- data=energy,\n- name=\"energy\",\n- format=\"5g\",\n- description=\"Reconstructed Energy\",\n- ),\n Column(\n data=energy,\n name=\"e_ref\",\n", "issue": "Strange warning when running the `SensitivityEstimator`\n**Gammapy version**\r\n\r\nCurrent main:\r\n\r\n```\r\nGammapy support for parallelisation with ray is still a prototype and is not fully functional.\r\n\r\nSystem:\r\n\r\n\tpython_executable : /home/maxnoe/.local/conda/envs/gammapy-dev/bin/python3.9 \r\n\tpython_version : 3.9.16 \r\n\tmachine : x86_64 \r\n\tsystem : Linux \r\n\r\n\r\nGammapy package:\r\n\r\n\tversion : 1.2.dev201+g514451881.d20230627 \r\n\tpath : /home/maxnoe/Projects/gammapy/gammapy \r\n\r\n\r\nOther packages:\r\n\r\n\tnumpy : 1.25.0 \r\n\tscipy : 1.11.0 \r\n\tastropy : 5.3 \r\n\tregions : 0.7 \r\n\tclick : 8.1.3 \r\n\tyaml : 6.0 \r\n\tIPython : 8.14.0 \r\n\tjupyterlab : 3.5.3 \r\n\tmatplotlib : 3.7.1 \r\n\tpandas : 2.0.2 \r\n\thealpy : 1.16.2 \r\n\timinuit : 2.22.0 \r\n\tsherpa : 4.15.1 \r\n\tnaima : 0.10.0 \r\n\temcee : 3.1.4 \r\n\tcorner : 2.2.2 \r\n\tray : 2.5.1 \r\n\r\n\r\nGammapy environment variables:\r\n\r\n\tGAMMAPY_DATA : /home/maxnoe/Projects/gammapy/gammapy-datasets/dev \r\n```\r\n\r\n**Bug description**\r\n\r\nThere is a warning I don't understand and that seems to be outside of my control and also is at least worded a bit strangely when running the `SensitivityEstimator`.\r\nI guess this should also be a `GammapyDeprecationWarning` and not using the logging system.\r\n\r\n> Table column name energy will be deprecated by e_ref since v1.2\r\n\r\n\r\nWhat is the intention here? Will the `energy` column be removed? Would a better message then be:\r\n\r\n\r\n> The column \"energy\" is deprecated as of Gammapy 1.x and will be removed in Gammapy 1.y. Use the `e_ref` column instead\r\n\r\n?\n", "before_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport logging\nimport numpy as np\nfrom astropy.table import Column, Table\nfrom gammapy.maps import Map\nfrom gammapy.modeling.models import PowerLawSpectralModel, SkyModel\nfrom gammapy.stats import WStatCountsStatistic\nfrom ..core import Estimator\n\n__all__ = [\"SensitivityEstimator\"]\n\n\nclass SensitivityEstimator(Estimator):\n \"\"\"Estimate sensitivity.\n\n This class allows to determine for each reconstructed energy bin the flux\n associated to the number of gamma-ray events for which the significance is\n ``n_sigma``, and being larger than ``gamma_min`` and ``bkg_sys`` percent\n larger than the number of background events in the ON region.\n\n\n Parameters\n ----------\n spectrum : `SpectralModel`\n Spectral model assumption. Default is Power Law with index 2.\n n_sigma : float, optional\n Minimum significance. Default is 5.\n gamma_min : float, optional\n Minimum number of gamma-rays. Default is 10.\n bkg_syst_fraction : float, optional\n Fraction of background counts above which the number of gamma-rays is. Default is 0.05.\n\n Examples\n --------\n For a usage example see :doc:`/tutorials/analysis-1d/cta_sensitivity` tutorial.\n\n \"\"\"\n\n tag = \"SensitivityEstimator\"\n\n def __init__(\n self,\n spectrum=None,\n n_sigma=5.0,\n gamma_min=10,\n bkg_syst_fraction=0.05,\n ):\n\n if spectrum is None:\n spectrum = PowerLawSpectralModel(index=2, amplitude=\"1 cm-2 s-1 TeV-1\")\n\n self.spectrum = spectrum\n self.n_sigma = n_sigma\n self.gamma_min = gamma_min\n self.bkg_syst_fraction = bkg_syst_fraction\n\n def estimate_min_excess(self, dataset):\n \"\"\"Estimate minimum excess to reach the given significance.\n\n Parameters\n ----------\n dataset : `SpectrumDataset`\n Spectrum dataset.\n\n Returns\n -------\n excess : `~gammapy.maps.RegionNDMap`\n Minimal excess.\n \"\"\"\n n_off = dataset.counts_off.data\n\n stat = WStatCountsStatistic(\n n_on=dataset.alpha.data * n_off, n_off=n_off, alpha=dataset.alpha.data\n )\n excess_counts = stat.n_sig_matching_significance(self.n_sigma)\n is_gamma_limited = excess_counts < self.gamma_min\n excess_counts[is_gamma_limited] = self.gamma_min\n bkg_syst_limited = (\n excess_counts < self.bkg_syst_fraction * dataset.background.data\n )\n excess_counts[bkg_syst_limited] = (\n self.bkg_syst_fraction * dataset.background.data[bkg_syst_limited]\n )\n excess = Map.from_geom(geom=dataset._geom, data=excess_counts)\n return excess\n\n def estimate_min_e2dnde(self, excess, dataset):\n \"\"\"Estimate dnde from a given minimum excess.\n\n Parameters\n ----------\n excess : `~gammapy.maps.RegionNDMap`\n Minimal excess.\n dataset : `~gammapy.datasets.SpectrumDataset`\n Spectrum dataset.\n\n Returns\n -------\n e2dnde : `~astropy.units.Quantity`\n Minimal differential flux.\n \"\"\"\n energy = dataset._geom.axes[\"energy\"].center\n\n dataset.models = SkyModel(spectral_model=self.spectrum)\n npred = dataset.npred_signal()\n\n phi_0 = excess / npred\n\n dnde_model = self.spectrum(energy=energy)\n dnde = phi_0.data[:, 0, 0] * dnde_model * energy**2\n return dnde.to(\"erg / (cm2 s)\")\n\n def _get_criterion(self, excess, bkg):\n is_gamma_limited = excess == self.gamma_min\n is_bkg_syst_limited = excess == bkg * self.bkg_syst_fraction\n criterion = np.chararray(excess.shape, itemsize=12)\n criterion[is_gamma_limited] = \"gamma\"\n criterion[is_bkg_syst_limited] = \"bkg\"\n criterion[~np.logical_or(is_gamma_limited, is_bkg_syst_limited)] = (\n \"significance\"\n )\n return criterion\n\n def run(self, dataset):\n \"\"\"Run the sensitivity estimation.\n\n Parameters\n ----------\n dataset : `SpectrumDatasetOnOff`\n Dataset to compute sensitivity for.\n\n Returns\n -------\n sensitivity : `~astropy.table.Table`\n Sensitivity table.\n \"\"\"\n energy = dataset._geom.axes[\"energy\"].center\n excess = self.estimate_min_excess(dataset)\n e2dnde = self.estimate_min_e2dnde(excess, dataset)\n criterion = self._get_criterion(\n excess.data.squeeze(), dataset.background.data.squeeze()\n )\n logging.warning(\n \"Table column name energy will be deprecated by e_ref since v1.2\"\n )\n\n return Table(\n [\n Column(\n data=energy,\n name=\"energy\",\n format=\"5g\",\n description=\"Reconstructed Energy\",\n ),\n Column(\n data=energy,\n name=\"e_ref\",\n format=\"5g\",\n description=\"Energy center\",\n ),\n Column(\n data=dataset._geom.axes[\"energy\"].edges_min,\n name=\"e_min\",\n format=\"5g\",\n description=\"Energy edge low\",\n ),\n Column(\n data=dataset._geom.axes[\"energy\"].edges_max,\n name=\"e_max\",\n format=\"5g\",\n description=\"Energy edge high\",\n ),\n Column(\n data=e2dnde,\n name=\"e2dnde\",\n format=\"5g\",\n description=\"Energy squared times differential flux\",\n ),\n Column(\n data=np.atleast_1d(excess.data.squeeze()),\n name=\"excess\",\n format=\"5g\",\n description=\"Number of excess counts in the bin\",\n ),\n Column(\n data=np.atleast_1d(dataset.background.data.squeeze()),\n name=\"background\",\n format=\"5g\",\n description=\"Number of background counts in the bin\",\n ),\n Column(\n data=np.atleast_1d(criterion),\n name=\"criterion\",\n description=\"Sensitivity-limiting criterion\",\n ),\n ]\n )\n", "path": "gammapy/estimators/points/sensitivity.py"}]} | 2,975 | 230 |
gh_patches_debug_19574 | rasdani/github-patches | git_diff | pymedusa__Medusa-1413 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[APP SUBMITTED]: TypeEError: 'int' object has no attribute '__getitem__'
### INFO
**Python Version**: `2.7.9 (default, Aug 13 2016, 17:56:53) [GCC 4.9.2]`
**Operating System**: `Linux-4.4.27-1-osmc-armv7l-with-debian-8.6`
**Locale**: `UTF-8`
**Branch**: [optimized2](../tree/optimized2)
**Database**: `44.3`
**Commit**: pymedusa/Medusa@ff8dcfa6034366fa6c9d0b526b67e3cdb02fafe6
**Link to Log**: https://gist.github.com/2f6af155ba86624566e4d31c88f94e40
### ERROR
<pre>
2016-10-31 12:46:51 ERROR SEARCHQUEUE-MANUAL-248835 :: [T411] :: [ff8dcfa] Unknown exception while searching T411. Error: "'int' object has no attribute '__getitem__'"
Traceback (most recent call last):
File "/home/osmc/Medusa/<a href="../blob/ff8dcfa6034366fa6c9d0b526b67e3cdb02fafe6/medusa/search/core.py#L621">medusa/search/core.py</a>", line 621, in searchProviders
downCurQuality, manual_search, manual_search_type)
File "/home/osmc/Medusa/<a href="../blob/ff8dcfa6034366fa6c9d0b526b67e3cdb02fafe6/medusa/providers/generic_provider.py#L182">medusa/providers/generic_provider.py</a>", line 182, in find_search_results
items_list += self.search(search_string, ep_obj=episode)
File "/home/osmc/Medusa/<a href="../blob/ff8dcfa6034366fa6c9d0b526b67e3cdb02fafe6/medusa/providers/torrent/json/t411.py#L106">medusa/providers/torrent/json/t411.py</a>", line 106, in search
results += self.parse(jdata, mode)
File "/home/osmc/Medusa/<a href="../blob/ff8dcfa6034366fa6c9d0b526b67e3cdb02fafe6/medusa/providers/torrent/json/t411.py#L121">medusa/providers/torrent/json/t411.py</a>", line 121, in parse
torrent_rows = sorted(unsorted_torrent_rows, key=itemgetter('added'), reverse=True)
TypeError: 'int' object has no attribute '__getitem__'
</pre>
---
_STAFF NOTIFIED_: @pymedusa/support @pymedusa/moderators
</issue>
<code>
[start of medusa/providers/torrent/json/t411.py]
1 # coding=utf-8
2 # Author: djoole <[email protected]>
3 #
4 # This file is part of Medusa.
5 #
6 # Medusa is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation, either version 3 of the License, or
9 # (at your option) any later version.
10 #
11 # Medusa is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with Medusa. If not, see <http://www.gnu.org/licenses/>.
18 """Provider code for T411."""
19 from __future__ import unicode_literals
20
21 import time
22 import traceback
23
24 from operator import itemgetter
25 from requests.auth import AuthBase
26 from requests.compat import urljoin
27 from ..torrent_provider import TorrentProvider
28 from .... import logger, tv_cache
29 from ....common import USER_AGENT
30 from ....helper.common import convert_size, try_int
31
32
33 class T411Provider(TorrentProvider): # pylint: disable=too-many-instance-attributes
34 """T411 Torrent provider."""
35
36 def __init__(self):
37 """Provider Init."""
38 TorrentProvider.__init__(self, "T411")
39
40 # Credentials
41 self.username = None
42 self.password = None
43 self.token = None
44 self.tokenLastUpdate = None
45
46 # URLs
47 self.url = 'https://api.t411.ch'
48 self.urls = {
49 'base_url': 'http://www.t411.ch/',
50 'search': urljoin(self.url, 'torrents/search/%s*?cid=%s&limit=100'),
51 'rss': urljoin(self.url, 'torrents/top/today'),
52 'login_page': urljoin(self.url, 'auth'),
53 'download': urljoin(self.url, 'torrents/download/%s'),
54 }
55
56 # Proper Strings
57
58 # Miscellaneous Options
59 self.headers.update({'User-Agent': USER_AGENT})
60 self.subcategories = [433, 637, 455, 639]
61 self.confirmed = False
62
63 # Torrent Stats
64 self.minseed = 0
65 self.minleech = 0
66
67 # Cache
68 self.cache = tv_cache.TVCache(self, min_time=10) # Only poll T411 every 10 minutes max
69
70 def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches
71 """Search a provider and parse the results.
72
73 :param search_strings: A dict with mode (key) and the search value (value)
74 :param age: Not used
75 :param ep_obj: Not used
76 :returns: A list of search results (structure)
77 """
78 results = []
79 if not self.login():
80 return results
81
82 for mode in search_strings:
83 logger.log('Search mode: {0}'.format(mode), logger.DEBUG)
84
85 for search_string in search_strings[mode]:
86 if mode != 'RSS':
87 logger.log('Search string: {search}'.format
88 (search=search_string), logger.DEBUG)
89 if self.confirmed:
90 logger.log('Searching only confirmed torrents', logger.DEBUG)
91
92 search_urls = ([self.urls['search'] % (search_string, u)
93 for u in self.subcategories], [self.urls['rss']])[mode == 'RSS']
94 for search_url in search_urls:
95 response = self.get_url(search_url, returns='response')
96
97 if not response or not response.content:
98 logger.log('No data returned from provider', logger.DEBUG)
99 continue
100
101 try:
102 jdata = response.json()
103 except ValueError: # also catches JSONDecodeError if simplejson is installed
104 logger.log('No data returned from provider', logger.DEBUG)
105 continue
106
107 results += self.parse(jdata, mode)
108
109 return results
110
111 def parse(self, data, mode):
112 """Parse search results for items.
113
114 :param data: The raw response from a search
115 :param mode: The current mode used to search, e.g. RSS
116
117 :return: A list of items found
118 """
119 items = []
120
121 unsorted_torrent_rows = data.get('torrents') if mode != 'RSS' else data
122 torrent_rows = sorted(unsorted_torrent_rows, key=itemgetter('added'), reverse=True)
123
124 if not torrent_rows or not isinstance(torrent_rows, list):
125 logger.log('Data returned from provider does not contain any {0}torrents'.format(
126 'confirmed ' if self.confirmed else ''), logger.DEBUG)
127 return items
128
129 for row in torrent_rows:
130 if not isinstance(row, dict):
131 logger.log('Invalid data returned from provider', logger.WARNING)
132 continue
133
134 if mode == 'RSS' and 'category' in row and try_int(row['category'], 0) not in self.subcategories:
135 continue
136
137 try:
138 title = row['name']
139 torrent_id = row['id']
140 download_url = (self.urls['download'] % torrent_id)
141 if not all([title, download_url]):
142 continue
143
144 seeders = try_int(row['seeders'])
145 leechers = try_int(row['leechers'])
146 verified = bool(row['isVerified'])
147
148 # Filter unseeded torrent
149 if seeders < min(self.minseed, 1):
150 if mode != 'RSS':
151 logger.log("Discarding torrent because it doesn't meet the "
152 "minimum seeders: {0}. Seeders: {1}".format
153 (title, seeders), logger.DEBUG)
154 continue
155
156 if self.confirmed and not verified and mode != 'RSS':
157 logger.log("Found result {0} but that doesn't seem like a verified"
158 " result so I'm ignoring it".format(title), logger.DEBUG)
159 continue
160
161 torrent_size = row['size']
162 size = convert_size(torrent_size) or -1
163
164 item = {
165 'title': title,
166 'link': download_url,
167 'size': size,
168 'seeders': seeders,
169 'leechers': leechers,
170 'pubdate': None,
171 'torrent_hash': None,
172 }
173 if mode != 'RSS':
174 logger.log('Found result: {0} with {1} seeders and {2} leechers'.format
175 (title, seeders, leechers), logger.DEBUG)
176
177 items.append(item)
178 except (AttributeError, TypeError, KeyError, ValueError, IndexError):
179 logger.log('Failed parsing provider. Traceback: {0!r}'.format
180 (traceback.format_exc()), logger.ERROR)
181
182 return items
183
184 def login(self):
185 """Log into provider."""
186 if self.token is not None:
187 if time.time() < (self.tokenLastUpdate + 30 * 60):
188 return True
189
190 login_params = {
191 'username': self.username,
192 'password': self.password,
193 }
194
195 response = self.get_url(self.urls['login_page'], post_data=login_params, returns='json')
196 if not response:
197 logger.log('Unable to connect to provider', logger.WARNING)
198 return False
199
200 if response and 'token' in response:
201 self.token = response['token']
202 self.tokenLastUpdate = time.time()
203 # self.uid = response['uid'].encode('ascii', 'ignore')
204 self.session.auth = T411Auth(self.token)
205 return True
206 else:
207 logger.log('Token not found in authentication response', logger.WARNING)
208 return False
209
210
211 class T411Auth(AuthBase): # pylint: disable=too-few-public-methods
212 """Attach HTTP Authentication to the given Request object."""
213
214 def __init__(self, token):
215 """Init object."""
216 self.token = token
217
218 def __call__(self, r):
219 """Add token to request header."""
220 r.headers['Authorization'] = self.token
221 return r
222
223
224 provider = T411Provider()
225
[end of medusa/providers/torrent/json/t411.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/medusa/providers/torrent/json/t411.py b/medusa/providers/torrent/json/t411.py
--- a/medusa/providers/torrent/json/t411.py
+++ b/medusa/providers/torrent/json/t411.py
@@ -119,13 +119,14 @@
items = []
unsorted_torrent_rows = data.get('torrents') if mode != 'RSS' else data
- torrent_rows = sorted(unsorted_torrent_rows, key=itemgetter('added'), reverse=True)
- if not torrent_rows or not isinstance(torrent_rows, list):
+ if not unsorted_torrent_rows or not isinstance(unsorted_torrent_rows, dict):
logger.log('Data returned from provider does not contain any {0}torrents'.format(
'confirmed ' if self.confirmed else ''), logger.DEBUG)
return items
+ torrent_rows = sorted(unsorted_torrent_rows, key=itemgetter('added'), reverse=True)
+
for row in torrent_rows:
if not isinstance(row, dict):
logger.log('Invalid data returned from provider', logger.WARNING)
| {"golden_diff": "diff --git a/medusa/providers/torrent/json/t411.py b/medusa/providers/torrent/json/t411.py\n--- a/medusa/providers/torrent/json/t411.py\n+++ b/medusa/providers/torrent/json/t411.py\n@@ -119,13 +119,14 @@\n items = []\n \n unsorted_torrent_rows = data.get('torrents') if mode != 'RSS' else data\n- torrent_rows = sorted(unsorted_torrent_rows, key=itemgetter('added'), reverse=True)\n \n- if not torrent_rows or not isinstance(torrent_rows, list):\n+ if not unsorted_torrent_rows or not isinstance(unsorted_torrent_rows, dict):\n logger.log('Data returned from provider does not contain any {0}torrents'.format(\n 'confirmed ' if self.confirmed else ''), logger.DEBUG)\n return items\n \n+ torrent_rows = sorted(unsorted_torrent_rows, key=itemgetter('added'), reverse=True)\n+\n for row in torrent_rows:\n if not isinstance(row, dict):\n logger.log('Invalid data returned from provider', logger.WARNING)\n", "issue": "[APP SUBMITTED]: TypeEError: 'int' object has no attribute '__getitem__'\n### INFO\n**Python Version**: `2.7.9 (default, Aug 13 2016, 17:56:53) [GCC 4.9.2]`\n**Operating System**: `Linux-4.4.27-1-osmc-armv7l-with-debian-8.6`\n**Locale**: `UTF-8`\n**Branch**: [optimized2](../tree/optimized2)\n**Database**: `44.3`\n**Commit**: pymedusa/Medusa@ff8dcfa6034366fa6c9d0b526b67e3cdb02fafe6\n**Link to Log**: https://gist.github.com/2f6af155ba86624566e4d31c88f94e40\n### ERROR\n<pre>\n2016-10-31 12:46:51 ERROR SEARCHQUEUE-MANUAL-248835 :: [T411] :: [ff8dcfa] Unknown exception while searching T411. Error: \"'int' object has no attribute '__getitem__'\"\nTraceback (most recent call last):\n File \"/home/osmc/Medusa/<a href=\"../blob/ff8dcfa6034366fa6c9d0b526b67e3cdb02fafe6/medusa/search/core.py#L621\">medusa/search/core.py</a>\", line 621, in searchProviders\n downCurQuality, manual_search, manual_search_type)\n File \"/home/osmc/Medusa/<a href=\"../blob/ff8dcfa6034366fa6c9d0b526b67e3cdb02fafe6/medusa/providers/generic_provider.py#L182\">medusa/providers/generic_provider.py</a>\", line 182, in find_search_results\n items_list += self.search(search_string, ep_obj=episode)\n File \"/home/osmc/Medusa/<a href=\"../blob/ff8dcfa6034366fa6c9d0b526b67e3cdb02fafe6/medusa/providers/torrent/json/t411.py#L106\">medusa/providers/torrent/json/t411.py</a>\", line 106, in search\n results += self.parse(jdata, mode)\n File \"/home/osmc/Medusa/<a href=\"../blob/ff8dcfa6034366fa6c9d0b526b67e3cdb02fafe6/medusa/providers/torrent/json/t411.py#L121\">medusa/providers/torrent/json/t411.py</a>\", line 121, in parse\n torrent_rows = sorted(unsorted_torrent_rows, key=itemgetter('added'), reverse=True)\nTypeError: 'int' object has no attribute '__getitem__'\n</pre>\n\n---\n_STAFF NOTIFIED_: @pymedusa/support @pymedusa/moderators\n", "before_files": [{"content": "# coding=utf-8\n# Author: djoole <[email protected]>\n#\n# This file is part of Medusa.\n#\n# Medusa is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Medusa is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Medusa. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"Provider code for T411.\"\"\"\nfrom __future__ import unicode_literals\n\nimport time\nimport traceback\n\nfrom operator import itemgetter\nfrom requests.auth import AuthBase\nfrom requests.compat import urljoin\nfrom ..torrent_provider import TorrentProvider\nfrom .... import logger, tv_cache\nfrom ....common import USER_AGENT\nfrom ....helper.common import convert_size, try_int\n\n\nclass T411Provider(TorrentProvider): # pylint: disable=too-many-instance-attributes\n \"\"\"T411 Torrent provider.\"\"\"\n\n def __init__(self):\n \"\"\"Provider Init.\"\"\"\n TorrentProvider.__init__(self, \"T411\")\n\n # Credentials\n self.username = None\n self.password = None\n self.token = None\n self.tokenLastUpdate = None\n\n # URLs\n self.url = 'https://api.t411.ch'\n self.urls = {\n 'base_url': 'http://www.t411.ch/',\n 'search': urljoin(self.url, 'torrents/search/%s*?cid=%s&limit=100'),\n 'rss': urljoin(self.url, 'torrents/top/today'),\n 'login_page': urljoin(self.url, 'auth'),\n 'download': urljoin(self.url, 'torrents/download/%s'),\n }\n\n # Proper Strings\n\n # Miscellaneous Options\n self.headers.update({'User-Agent': USER_AGENT})\n self.subcategories = [433, 637, 455, 639]\n self.confirmed = False\n\n # Torrent Stats\n self.minseed = 0\n self.minleech = 0\n\n # Cache\n self.cache = tv_cache.TVCache(self, min_time=10) # Only poll T411 every 10 minutes max\n\n def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches\n \"\"\"Search a provider and parse the results.\n\n :param search_strings: A dict with mode (key) and the search value (value)\n :param age: Not used\n :param ep_obj: Not used\n :returns: A list of search results (structure)\n \"\"\"\n results = []\n if not self.login():\n return results\n\n for mode in search_strings:\n logger.log('Search mode: {0}'.format(mode), logger.DEBUG)\n\n for search_string in search_strings[mode]:\n if mode != 'RSS':\n logger.log('Search string: {search}'.format\n (search=search_string), logger.DEBUG)\n if self.confirmed:\n logger.log('Searching only confirmed torrents', logger.DEBUG)\n\n search_urls = ([self.urls['search'] % (search_string, u)\n for u in self.subcategories], [self.urls['rss']])[mode == 'RSS']\n for search_url in search_urls:\n response = self.get_url(search_url, returns='response')\n\n if not response or not response.content:\n logger.log('No data returned from provider', logger.DEBUG)\n continue\n\n try:\n jdata = response.json()\n except ValueError: # also catches JSONDecodeError if simplejson is installed\n logger.log('No data returned from provider', logger.DEBUG)\n continue\n\n results += self.parse(jdata, mode)\n\n return results\n\n def parse(self, data, mode):\n \"\"\"Parse search results for items.\n\n :param data: The raw response from a search\n :param mode: The current mode used to search, e.g. RSS\n\n :return: A list of items found\n \"\"\"\n items = []\n\n unsorted_torrent_rows = data.get('torrents') if mode != 'RSS' else data\n torrent_rows = sorted(unsorted_torrent_rows, key=itemgetter('added'), reverse=True)\n\n if not torrent_rows or not isinstance(torrent_rows, list):\n logger.log('Data returned from provider does not contain any {0}torrents'.format(\n 'confirmed ' if self.confirmed else ''), logger.DEBUG)\n return items\n\n for row in torrent_rows:\n if not isinstance(row, dict):\n logger.log('Invalid data returned from provider', logger.WARNING)\n continue\n\n if mode == 'RSS' and 'category' in row and try_int(row['category'], 0) not in self.subcategories:\n continue\n\n try:\n title = row['name']\n torrent_id = row['id']\n download_url = (self.urls['download'] % torrent_id)\n if not all([title, download_url]):\n continue\n\n seeders = try_int(row['seeders'])\n leechers = try_int(row['leechers'])\n verified = bool(row['isVerified'])\n\n # Filter unseeded torrent\n if seeders < min(self.minseed, 1):\n if mode != 'RSS':\n logger.log(\"Discarding torrent because it doesn't meet the \"\n \"minimum seeders: {0}. Seeders: {1}\".format\n (title, seeders), logger.DEBUG)\n continue\n\n if self.confirmed and not verified and mode != 'RSS':\n logger.log(\"Found result {0} but that doesn't seem like a verified\"\n \" result so I'm ignoring it\".format(title), logger.DEBUG)\n continue\n\n torrent_size = row['size']\n size = convert_size(torrent_size) or -1\n\n item = {\n 'title': title,\n 'link': download_url,\n 'size': size,\n 'seeders': seeders,\n 'leechers': leechers,\n 'pubdate': None,\n 'torrent_hash': None,\n }\n if mode != 'RSS':\n logger.log('Found result: {0} with {1} seeders and {2} leechers'.format\n (title, seeders, leechers), logger.DEBUG)\n\n items.append(item)\n except (AttributeError, TypeError, KeyError, ValueError, IndexError):\n logger.log('Failed parsing provider. Traceback: {0!r}'.format\n (traceback.format_exc()), logger.ERROR)\n\n return items\n\n def login(self):\n \"\"\"Log into provider.\"\"\"\n if self.token is not None:\n if time.time() < (self.tokenLastUpdate + 30 * 60):\n return True\n\n login_params = {\n 'username': self.username,\n 'password': self.password,\n }\n\n response = self.get_url(self.urls['login_page'], post_data=login_params, returns='json')\n if not response:\n logger.log('Unable to connect to provider', logger.WARNING)\n return False\n\n if response and 'token' in response:\n self.token = response['token']\n self.tokenLastUpdate = time.time()\n # self.uid = response['uid'].encode('ascii', 'ignore')\n self.session.auth = T411Auth(self.token)\n return True\n else:\n logger.log('Token not found in authentication response', logger.WARNING)\n return False\n\n\nclass T411Auth(AuthBase): # pylint: disable=too-few-public-methods\n \"\"\"Attach HTTP Authentication to the given Request object.\"\"\"\n\n def __init__(self, token):\n \"\"\"Init object.\"\"\"\n self.token = token\n\n def __call__(self, r):\n \"\"\"Add token to request header.\"\"\"\n r.headers['Authorization'] = self.token\n return r\n\n\nprovider = T411Provider()\n", "path": "medusa/providers/torrent/json/t411.py"}]} | 3,637 | 247 |
gh_patches_debug_20933 | rasdani/github-patches | git_diff | fossasia__open-event-server-5516 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Set up logging correctly
Currently, logging is hardcoded in the app and never really used. Logging should be used almost on every action with proper label as:
- trace
- debug
- info
- warn
- error
Right now, logger is hardcoded to show ERROR only. This needs to be configurable through the command line.
Also, I think default should be `INFO` or even `DEBUG` for the app itself and warn for other 3rd party libraries.
</issue>
<code>
[start of app/__init__.py]
1 from celery.signals import after_task_publish
2 import logging
3 import os.path
4 from envparse import env
5 import sys
6 from flask import Flask, json, make_response
7 from app.settings import get_settings, get_setts
8 from flask_migrate import Migrate, MigrateCommand
9 from flask_script import Manager
10 from flask_login import current_user
11 from flask_jwt import JWT
12 from datetime import timedelta
13 from flask_cors import CORS
14 from flask_rest_jsonapi.errors import jsonapi_errors
15 from flask_rest_jsonapi.exceptions import JsonApiException
16 from healthcheck import HealthCheck, EnvironmentDump
17 from apscheduler.schedulers.background import BackgroundScheduler
18 from elasticsearch_dsl.connections import connections
19 from pytz import utc
20
21 import sqlalchemy as sa
22
23 import stripe
24 from app.settings import get_settings
25 from app.models import db
26 from app.api.helpers.jwt import jwt_authenticate, jwt_identity
27 from app.api.helpers.cache import cache
28 from werkzeug.contrib.profiler import ProfilerMiddleware
29 from app.views import BlueprintsManager
30 from app.api.helpers.auth import AuthManager
31 from app.api.helpers.scheduled_jobs import send_after_event_mail, send_event_fee_notification, \
32 send_event_fee_notification_followup
33 from app.models.event import Event
34 from app.models.role_invite import RoleInvite
35 from app.views.healthcheck import health_check_celery, health_check_db, health_check_migrations, check_migrations
36 from app.views.sentry import sentry
37 from app.views.elastic_search import client
38 from app.views.elastic_cron_helpers import sync_events_elasticsearch, cron_rebuild_events_elasticsearch
39 from app.views.redis_store import redis_store
40 from app.views.celery_ import celery
41 from app.templates.flask_ext.jinja.filters import init_filters
42
43
44 BASE_DIR = os.path.dirname(os.path.abspath(__file__))
45
46 static_dir = os.path.dirname(os.path.dirname(__file__)) + "/static"
47 template_dir = os.path.dirname(__file__) + "/templates"
48 app = Flask(__name__, static_folder=static_dir, template_folder=template_dir)
49
50 env.read_envfile()
51
52
53 class ReverseProxied(object):
54 """
55 ReverseProxied flask wsgi app wrapper from http://stackoverflow.com/a/37842465/1562480 by aldel
56 """
57
58 def __init__(self, app):
59 self.app = app
60
61 def __call__(self, environ, start_response):
62 scheme = environ.get('HTTP_X_FORWARDED_PROTO')
63 if scheme:
64 environ['wsgi.url_scheme'] = scheme
65 if os.getenv('FORCE_SSL', 'no') == 'yes':
66 environ['wsgi.url_scheme'] = 'https'
67 return self.app(environ, start_response)
68
69
70 app.wsgi_app = ReverseProxied(app.wsgi_app)
71
72 app_created = False
73
74
75 def create_app():
76 global app_created
77 if not app_created:
78 BlueprintsManager.register(app)
79 Migrate(app, db)
80
81 app.config.from_object(env('APP_CONFIG', default='config.ProductionConfig'))
82 db.init_app(app)
83 _manager = Manager(app)
84 _manager.add_command('db', MigrateCommand)
85
86 if app.config['CACHING']:
87 cache.init_app(app, config={'CACHE_TYPE': 'simple'})
88 else:
89 cache.init_app(app, config={'CACHE_TYPE': 'null'})
90
91 stripe.api_key = 'SomeStripeKey'
92 app.secret_key = 'super secret key'
93 app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
94 app.config['FILE_SYSTEM_STORAGE_FILE_VIEW'] = 'static'
95
96 app.logger.addHandler(logging.StreamHandler(sys.stdout))
97 app.logger.setLevel(logging.ERROR)
98
99 # set up jwt
100 app.config['JWT_AUTH_USERNAME_KEY'] = 'email'
101 app.config['JWT_EXPIRATION_DELTA'] = timedelta(seconds=24 * 60 * 60)
102 app.config['JWT_AUTH_URL_RULE'] = '/auth/session'
103 _jwt = JWT(app, jwt_authenticate, jwt_identity)
104
105 # setup celery
106 app.config['CELERY_BROKER_URL'] = app.config['REDIS_URL']
107 app.config['CELERY_RESULT_BACKEND'] = app.config['CELERY_BROKER_URL']
108
109 CORS(app, resources={r"/*": {"origins": "*"}})
110 AuthManager.init_login(app)
111
112 if app.config['TESTING'] and app.config['PROFILE']:
113 # Profiling
114 app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])
115
116 # development api
117 with app.app_context():
118 from app.api.admin_statistics_api.events import event_statistics
119 from app.api.auth import auth_routes
120 from app.api.attendees import attendee_misc_routes
121 from app.api.bootstrap import api_v1
122 from app.api.celery_tasks import celery_routes
123 from app.api.event_copy import event_copy
124 from app.api.exports import export_routes
125 from app.api.imports import import_routes
126 from app.api.uploads import upload_routes
127 from app.api.users import user_misc_routes
128 from app.api.orders import order_misc_routes
129 from app.api.role_invites import role_invites_misc_routes
130
131 app.register_blueprint(api_v1)
132 app.register_blueprint(event_copy)
133 app.register_blueprint(upload_routes)
134 app.register_blueprint(export_routes)
135 app.register_blueprint(import_routes)
136 app.register_blueprint(celery_routes)
137 app.register_blueprint(auth_routes)
138 app.register_blueprint(event_statistics)
139 app.register_blueprint(user_misc_routes)
140 app.register_blueprint(attendee_misc_routes)
141 app.register_blueprint(order_misc_routes)
142 app.register_blueprint(role_invites_misc_routes)
143
144 sa.orm.configure_mappers()
145
146 if app.config['SERVE_STATIC']:
147 app.add_url_rule('/static/<path:filename>',
148 endpoint='static',
149 view_func=app.send_static_file)
150
151 # sentry
152 if not app_created and 'SENTRY_DSN' in app.config:
153 sentry.init_app(app, dsn=app.config['SENTRY_DSN'])
154
155 # redis
156 redis_store.init_app(app)
157
158 # elasticsearch
159 if app.config['ENABLE_ELASTICSEARCH']:
160 client.init_app(app)
161 connections.add_connection('default', client.elasticsearch)
162 with app.app_context():
163 try:
164 cron_rebuild_events_elasticsearch.delay()
165 except Exception:
166 pass
167
168 app_created = True
169 return app, _manager, db, _jwt
170
171
172 current_app, manager, database, jwt = create_app()
173 init_filters(app)
174
175
176 # http://stackoverflow.com/questions/26724623/
177 @app.before_request
178 def track_user():
179 if current_user.is_authenticated:
180 current_user.update_lat()
181
182
183 def make_celery(app=None):
184 app = app or create_app()[0]
185 celery.conf.update(app.config)
186 task_base = celery.Task
187
188 class ContextTask(task_base):
189 abstract = True
190
191 def __call__(self, *args, **kwargs):
192 if current_app.config['TESTING']:
193 with app.test_request_context():
194 return task_base.__call__(self, *args, **kwargs)
195 with app.app_context():
196 return task_base.__call__(self, *args, **kwargs)
197
198 celery.Task = ContextTask
199 return celery
200
201
202 # Health-check
203 health = HealthCheck(current_app, "/health-check")
204 envdump = EnvironmentDump(current_app, "/environment", include_config=False)
205 health.add_check(health_check_celery)
206 health.add_check(health_check_db)
207 with current_app.app_context():
208 current_app.config['MIGRATION_STATUS'] = check_migrations()
209 health.add_check(health_check_migrations)
210
211
212 # http://stackoverflow.com/questions/9824172/find-out-whether-celery-task-exists
213 @after_task_publish.connect
214 def update_sent_state(sender=None, body=None, **kwargs):
215 # the task may not exist if sent using `send_task` which
216 # sends tasks by name, so fall back to the default result backend
217 # if that is the case.
218 task = celery.tasks.get(sender)
219 backend = task.backend if task else celery.backend
220 backend.store_result(body['id'], None, 'WAITING')
221
222
223 # register celery tasks. removing them will cause the tasks to not function. so don't remove them
224 # it is important to register them after celery is defined to resolve circular imports
225
226 from .api.helpers import tasks
227
228 # import helpers.tasks
229
230
231 scheduler = BackgroundScheduler(timezone=utc)
232 # scheduler.add_job(send_mail_to_expired_orders, 'interval', hours=5)
233 # scheduler.add_job(empty_trash, 'cron', hour=5, minute=30)
234 if app.config['ENABLE_ELASTICSEARCH']:
235 scheduler.add_job(sync_events_elasticsearch, 'interval', minutes=60)
236 scheduler.add_job(cron_rebuild_events_elasticsearch, 'cron', day=7)
237
238 scheduler.add_job(send_after_event_mail, 'cron', hour=5, minute=30)
239 scheduler.add_job(send_event_fee_notification, 'cron', day=1)
240 scheduler.add_job(send_event_fee_notification_followup, 'cron', day=15)
241 scheduler.start()
242
243
244 @app.errorhandler(500)
245 def internal_server_error(error):
246 if current_app.config['PROPOGATE_ERROR'] is True:
247 exc = JsonApiException({'pointer': ''}, str(error))
248 else:
249 exc = JsonApiException({'pointer': ''}, 'Unknown error')
250 return make_response(json.dumps(jsonapi_errors([exc.to_dict()])), exc.status,
251 {'Content-Type': 'application/vnd.api+json'})
252
253
254 if __name__ == '__main__':
255 current_app.run()
256
[end of app/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/__init__.py b/app/__init__.py
--- a/app/__init__.py
+++ b/app/__init__.py
@@ -1,4 +1,5 @@
from celery.signals import after_task_publish
+from logging.config import dictConfig
import logging
import os.path
from envparse import env
@@ -93,8 +94,28 @@
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
app.config['FILE_SYSTEM_STORAGE_FILE_VIEW'] = 'static'
- app.logger.addHandler(logging.StreamHandler(sys.stdout))
- app.logger.setLevel(logging.ERROR)
+ app.logger = logging.getLogger(__name__)
+
+ dictConfig({
+ 'version': 1,
+ 'handlers': {
+ 'console': {
+ 'level': logging.WARN,
+ 'class': 'logging.StreamHandler',
+ 'stream': 'ext://sys.stdout'
+ }
+ },
+ 'loggers': {
+ '': {
+ 'handlers': ['console'],
+ 'level': logging.WARN
+ },
+ 'default': {
+ 'handlers': ['console'],
+ 'level': logging.INFO
+ }
+ }
+ })
# set up jwt
app.config['JWT_AUTH_USERNAME_KEY'] = 'email'
| {"golden_diff": "diff --git a/app/__init__.py b/app/__init__.py\n--- a/app/__init__.py\n+++ b/app/__init__.py\n@@ -1,4 +1,5 @@\n from celery.signals import after_task_publish\n+from logging.config import dictConfig\n import logging\n import os.path\n from envparse import env\n@@ -93,8 +94,28 @@\n app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False\n app.config['FILE_SYSTEM_STORAGE_FILE_VIEW'] = 'static'\n \n- app.logger.addHandler(logging.StreamHandler(sys.stdout))\n- app.logger.setLevel(logging.ERROR)\n+ app.logger = logging.getLogger(__name__)\n+\n+ dictConfig({\n+ 'version': 1,\n+ 'handlers': {\n+ 'console': {\n+ 'level': logging.WARN,\n+ 'class': 'logging.StreamHandler',\n+ 'stream': 'ext://sys.stdout'\n+ }\n+ },\n+ 'loggers': {\n+ '': {\n+ 'handlers': ['console'],\n+ 'level': logging.WARN\n+ },\n+ 'default': {\n+ 'handlers': ['console'],\n+ 'level': logging.INFO\n+ }\n+ }\n+ })\n \n # set up jwt\n app.config['JWT_AUTH_USERNAME_KEY'] = 'email'\n", "issue": "Set up logging correctly\nCurrently, logging is hardcoded in the app and never really used. Logging should be used almost on every action with proper label as:\r\n\r\n- trace\r\n- debug\r\n- info\r\n- warn\r\n- error\r\n\r\nRight now, logger is hardcoded to show ERROR only. This needs to be configurable through the command line. \r\n\r\nAlso, I think default should be `INFO` or even `DEBUG` for the app itself and warn for other 3rd party libraries. \n", "before_files": [{"content": "from celery.signals import after_task_publish\nimport logging\nimport os.path\nfrom envparse import env\nimport sys\nfrom flask import Flask, json, make_response\nfrom app.settings import get_settings, get_setts\nfrom flask_migrate import Migrate, MigrateCommand\nfrom flask_script import Manager\nfrom flask_login import current_user\nfrom flask_jwt import JWT\nfrom datetime import timedelta\nfrom flask_cors import CORS\nfrom flask_rest_jsonapi.errors import jsonapi_errors\nfrom flask_rest_jsonapi.exceptions import JsonApiException\nfrom healthcheck import HealthCheck, EnvironmentDump\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom elasticsearch_dsl.connections import connections\nfrom pytz import utc\n\nimport sqlalchemy as sa\n\nimport stripe\nfrom app.settings import get_settings\nfrom app.models import db\nfrom app.api.helpers.jwt import jwt_authenticate, jwt_identity\nfrom app.api.helpers.cache import cache\nfrom werkzeug.contrib.profiler import ProfilerMiddleware\nfrom app.views import BlueprintsManager\nfrom app.api.helpers.auth import AuthManager\nfrom app.api.helpers.scheduled_jobs import send_after_event_mail, send_event_fee_notification, \\\n send_event_fee_notification_followup\nfrom app.models.event import Event\nfrom app.models.role_invite import RoleInvite\nfrom app.views.healthcheck import health_check_celery, health_check_db, health_check_migrations, check_migrations\nfrom app.views.sentry import sentry\nfrom app.views.elastic_search import client\nfrom app.views.elastic_cron_helpers import sync_events_elasticsearch, cron_rebuild_events_elasticsearch\nfrom app.views.redis_store import redis_store\nfrom app.views.celery_ import celery\nfrom app.templates.flask_ext.jinja.filters import init_filters\n\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\nstatic_dir = os.path.dirname(os.path.dirname(__file__)) + \"/static\"\ntemplate_dir = os.path.dirname(__file__) + \"/templates\"\napp = Flask(__name__, static_folder=static_dir, template_folder=template_dir)\n\nenv.read_envfile()\n\n\nclass ReverseProxied(object):\n \"\"\"\n ReverseProxied flask wsgi app wrapper from http://stackoverflow.com/a/37842465/1562480 by aldel\n \"\"\"\n\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n scheme = environ.get('HTTP_X_FORWARDED_PROTO')\n if scheme:\n environ['wsgi.url_scheme'] = scheme\n if os.getenv('FORCE_SSL', 'no') == 'yes':\n environ['wsgi.url_scheme'] = 'https'\n return self.app(environ, start_response)\n\n\napp.wsgi_app = ReverseProxied(app.wsgi_app)\n\napp_created = False\n\n\ndef create_app():\n global app_created\n if not app_created:\n BlueprintsManager.register(app)\n Migrate(app, db)\n\n app.config.from_object(env('APP_CONFIG', default='config.ProductionConfig'))\n db.init_app(app)\n _manager = Manager(app)\n _manager.add_command('db', MigrateCommand)\n\n if app.config['CACHING']:\n cache.init_app(app, config={'CACHE_TYPE': 'simple'})\n else:\n cache.init_app(app, config={'CACHE_TYPE': 'null'})\n\n stripe.api_key = 'SomeStripeKey'\n app.secret_key = 'super secret key'\n app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False\n app.config['FILE_SYSTEM_STORAGE_FILE_VIEW'] = 'static'\n\n app.logger.addHandler(logging.StreamHandler(sys.stdout))\n app.logger.setLevel(logging.ERROR)\n\n # set up jwt\n app.config['JWT_AUTH_USERNAME_KEY'] = 'email'\n app.config['JWT_EXPIRATION_DELTA'] = timedelta(seconds=24 * 60 * 60)\n app.config['JWT_AUTH_URL_RULE'] = '/auth/session'\n _jwt = JWT(app, jwt_authenticate, jwt_identity)\n\n # setup celery\n app.config['CELERY_BROKER_URL'] = app.config['REDIS_URL']\n app.config['CELERY_RESULT_BACKEND'] = app.config['CELERY_BROKER_URL']\n\n CORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\n AuthManager.init_login(app)\n\n if app.config['TESTING'] and app.config['PROFILE']:\n # Profiling\n app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])\n\n # development api\n with app.app_context():\n from app.api.admin_statistics_api.events import event_statistics\n from app.api.auth import auth_routes\n from app.api.attendees import attendee_misc_routes\n from app.api.bootstrap import api_v1\n from app.api.celery_tasks import celery_routes\n from app.api.event_copy import event_copy\n from app.api.exports import export_routes\n from app.api.imports import import_routes\n from app.api.uploads import upload_routes\n from app.api.users import user_misc_routes\n from app.api.orders import order_misc_routes\n from app.api.role_invites import role_invites_misc_routes\n\n app.register_blueprint(api_v1)\n app.register_blueprint(event_copy)\n app.register_blueprint(upload_routes)\n app.register_blueprint(export_routes)\n app.register_blueprint(import_routes)\n app.register_blueprint(celery_routes)\n app.register_blueprint(auth_routes)\n app.register_blueprint(event_statistics)\n app.register_blueprint(user_misc_routes)\n app.register_blueprint(attendee_misc_routes)\n app.register_blueprint(order_misc_routes)\n app.register_blueprint(role_invites_misc_routes)\n\n sa.orm.configure_mappers()\n\n if app.config['SERVE_STATIC']:\n app.add_url_rule('/static/<path:filename>',\n endpoint='static',\n view_func=app.send_static_file)\n\n # sentry\n if not app_created and 'SENTRY_DSN' in app.config:\n sentry.init_app(app, dsn=app.config['SENTRY_DSN'])\n\n # redis\n redis_store.init_app(app)\n\n # elasticsearch\n if app.config['ENABLE_ELASTICSEARCH']:\n client.init_app(app)\n connections.add_connection('default', client.elasticsearch)\n with app.app_context():\n try:\n cron_rebuild_events_elasticsearch.delay()\n except Exception:\n pass\n\n app_created = True\n return app, _manager, db, _jwt\n\n\ncurrent_app, manager, database, jwt = create_app()\ninit_filters(app)\n\n\n# http://stackoverflow.com/questions/26724623/\[email protected]_request\ndef track_user():\n if current_user.is_authenticated:\n current_user.update_lat()\n\n\ndef make_celery(app=None):\n app = app or create_app()[0]\n celery.conf.update(app.config)\n task_base = celery.Task\n\n class ContextTask(task_base):\n abstract = True\n\n def __call__(self, *args, **kwargs):\n if current_app.config['TESTING']:\n with app.test_request_context():\n return task_base.__call__(self, *args, **kwargs)\n with app.app_context():\n return task_base.__call__(self, *args, **kwargs)\n\n celery.Task = ContextTask\n return celery\n\n\n# Health-check\nhealth = HealthCheck(current_app, \"/health-check\")\nenvdump = EnvironmentDump(current_app, \"/environment\", include_config=False)\nhealth.add_check(health_check_celery)\nhealth.add_check(health_check_db)\nwith current_app.app_context():\n current_app.config['MIGRATION_STATUS'] = check_migrations()\nhealth.add_check(health_check_migrations)\n\n\n# http://stackoverflow.com/questions/9824172/find-out-whether-celery-task-exists\n@after_task_publish.connect\ndef update_sent_state(sender=None, body=None, **kwargs):\n # the task may not exist if sent using `send_task` which\n # sends tasks by name, so fall back to the default result backend\n # if that is the case.\n task = celery.tasks.get(sender)\n backend = task.backend if task else celery.backend\n backend.store_result(body['id'], None, 'WAITING')\n\n\n# register celery tasks. removing them will cause the tasks to not function. so don't remove them\n# it is important to register them after celery is defined to resolve circular imports\n\nfrom .api.helpers import tasks\n\n# import helpers.tasks\n\n\nscheduler = BackgroundScheduler(timezone=utc)\n# scheduler.add_job(send_mail_to_expired_orders, 'interval', hours=5)\n# scheduler.add_job(empty_trash, 'cron', hour=5, minute=30)\nif app.config['ENABLE_ELASTICSEARCH']:\n scheduler.add_job(sync_events_elasticsearch, 'interval', minutes=60)\n scheduler.add_job(cron_rebuild_events_elasticsearch, 'cron', day=7)\n\nscheduler.add_job(send_after_event_mail, 'cron', hour=5, minute=30)\nscheduler.add_job(send_event_fee_notification, 'cron', day=1)\nscheduler.add_job(send_event_fee_notification_followup, 'cron', day=15)\nscheduler.start()\n\n\[email protected](500)\ndef internal_server_error(error):\n if current_app.config['PROPOGATE_ERROR'] is True:\n exc = JsonApiException({'pointer': ''}, str(error))\n else:\n exc = JsonApiException({'pointer': ''}, 'Unknown error')\n return make_response(json.dumps(jsonapi_errors([exc.to_dict()])), exc.status,\n {'Content-Type': 'application/vnd.api+json'})\n\n\nif __name__ == '__main__':\n current_app.run()\n", "path": "app/__init__.py"}]} | 3,349 | 288 |
gh_patches_debug_2412 | rasdani/github-patches | git_diff | quantumlib__Cirq-3689 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add to heatmap visualization tests
In the `test_colorbar` test there is a comment about testing that the position size and pad arguments are respected.
</issue>
<code>
[start of cirq/vis/heatmap.py]
1 # Copyright 2019 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Heatmap class.
15
16 See examples/bristlecone_heatmap_example.py for an example usage in
17 an interactive session.
18 """
19
20 from typing import Any, Dict, List, Mapping, Optional, SupportsFloat, Tuple, Union
21
22 import numpy as np
23 import matplotlib as mpl
24 import matplotlib.pyplot as plt
25 import pandas as pd
26 from matplotlib import collections as mpl_collections
27 from mpl_toolkits import axes_grid1
28
29 from cirq.devices import grid_qubit
30
31 QubitCoordinate = Union[Tuple[int, int], grid_qubit.GridQubit]
32
33 # The value map is qubit coordinate -> a type that supports float conversion.
34 ValueMap = Union[Dict[grid_qubit.GridQubit, SupportsFloat], Dict[Tuple[int, int], SupportsFloat]]
35
36
37 def _get_qubit_row_col(qubit: QubitCoordinate) -> Tuple[int, int]:
38 if isinstance(qubit, grid_qubit.GridQubit):
39 return qubit.row, qubit.col
40 elif isinstance(qubit, tuple):
41 return qubit[0], qubit[1]
42
43
44 def relative_luminance(color: np.ndarray) -> float:
45 """Returns the relative luminance according to W3C specification.
46
47 Spec: https://www.w3.org/TR/WCAG21/#dfn-relative-luminance.
48
49 Args:
50 color: a numpy array with the first 3 elements red, green, and blue
51 with values in [0, 1].
52 Returns:
53 relative luminance of color in [0, 1].
54 """
55 rgb = color[:3]
56 rgb = np.where(rgb <= 0.03928, rgb / 12.92, ((rgb + 0.055) / 1.055) ** 2.4)
57 return rgb.dot([0.2126, 0.7152, 0.0722])
58
59
60 class Heatmap:
61 """Distribution of a value in 2D qubit lattice as a color map."""
62
63 def __init__(self, value_map: ValueMap) -> None:
64 self.set_value_map(value_map)
65 self.annot_map = { # Default annotation.
66 _get_qubit_row_col(qubit): format(float(value), '.2g')
67 for qubit, value in value_map.items()
68 }
69 self.annot_kwargs: Dict[str, Any] = {}
70 self.unset_url_map()
71 self.set_colorbar()
72 self.set_colormap()
73
74 def set_value_map(self, value_map: ValueMap) -> 'Heatmap':
75 """Sets the values for each qubit.
76
77 Args:
78 value_map: the values for determining color for each cell.
79 """
80 # Fail fast if float() fails.
81 # Keep the original value object for annotation.
82 self.value_map = {qubit: (float(value), value) for qubit, value in value_map.items()}
83 return self
84
85 def set_annotation_map(
86 self, annot_map: Mapping[QubitCoordinate, str], **text_options: str
87 ) -> 'Heatmap':
88 """Sets the annotation text for each qubit.
89
90 Note that set_annotation_map() and set_annotation_format()
91 both sets the annotation map to be used. Whichever is called later wins.
92
93 Args:
94 annot_map: the texts to be drawn on each qubit cell.
95 text_options: keyword arguments passed to matplotlib.text.Text()
96 when drawing the annotation texts.
97 """
98 self.annot_map = {_get_qubit_row_col(qubit): value for qubit, value in annot_map.items()}
99 self.annot_kwargs = text_options
100 return self
101
102 def set_annotation_format(self, annot_format: str, **text_options: str) -> 'Heatmap':
103 """Sets a format string to format values for each qubit.
104
105 Args:
106 annot_format: the format string for formatting values.
107 text_options: keyword arguments to matplotlib.text.Text().
108 """
109 self.annot_map = {
110 _get_qubit_row_col(qubit): format(value[1], annot_format)
111 for qubit, value in self.value_map.items()
112 }
113 self.annot_kwargs = text_options
114 return self
115
116 def unset_annotation(self) -> 'Heatmap':
117 """Disables annotation. No texts are shown in cells."""
118 self.annot_map = {}
119 return self
120
121 def set_url_map(self, url_map: Mapping[QubitCoordinate, str]) -> 'Heatmap':
122 """Sets the URLs for each cell."""
123 self.url_map = {_get_qubit_row_col(qubit): value for qubit, value in url_map.items()}
124 return self
125
126 def unset_url_map(self) -> 'Heatmap':
127 """Disables URL. No URLs are associated with cells."""
128 self.url_map = {}
129 return self
130
131 def set_colorbar(
132 self, position: str = 'right', size: str = '5%', pad: str = '2%', **colorbar_options: Any
133 ) -> 'Heatmap':
134 """Sets location and style of colorbar.
135
136 Args:
137 position: colorbar position, one of 'left'|'right'|'top'|'bottom'.
138 size: a string ending in '%' to specify the width of the colorbar.
139 Nominally, '100%' means the same width as the heatmap.
140 pad: a string ending in '%' to specify the space between the
141 colorbar and the heatmap.
142 colorbar_options: keyword arguments passed to
143 matplotlib.Figure.colorbar().
144 """
145 self.plot_colorbar = True
146 self.colorbar_location_options = {'position': position, 'size': size, 'pad': pad}
147 self.colorbar_options = colorbar_options
148 return self
149
150 def unset_colorbar(self) -> 'Heatmap':
151 """Disables colorbar. No colorbar is drawn."""
152 self.plot_colorbar = False
153 return self
154
155 def set_colormap(
156 self,
157 colormap: Union[str, mpl.colors.Colormap] = 'viridis',
158 vmin: Optional[float] = None,
159 vmax: Optional[float] = None,
160 ) -> 'Heatmap':
161 """Sets the colormap.
162
163 Args:
164 colormap: either a colormap name or a Colormap instance.
165 vmin: the minimum value to map to the minimum color. Default is
166 the minimum value in value_map.
167 vmax: the maximum value to map to the maximum color. Default is
168 the maximum value in value_map.
169 """
170 self.colormap = colormap
171 self.vmin = vmin
172 self.vmax = vmax
173 return self
174
175 def plot(
176 self, ax: Optional[plt.Axes] = None, **pcolor_options: Any
177 ) -> Tuple[plt.Axes, mpl_collections.Collection, pd.DataFrame]:
178 """Plots the heatmap on the given Axes.
179
180 Args:
181 ax: the Axes to plot on. If not given, a new figure is created,
182 plotted on, and shown.
183 pcolor_options: keyword arguments passed to ax.pcolor().
184
185 Returns:
186 A 3-tuple ``(ax, mesh, value_table)``. ``ax`` is the `plt.Axes` that
187 is plotted on. ``mesh`` is the collection of paths drawn and filled.
188 ``value_table`` is the 2-D pandas DataFrame of values constructed
189 from the value_map.
190 """
191 show_plot = not ax
192 if not ax:
193 fig, ax = plt.subplots(figsize=(8, 8))
194 # Find the boundary and size of the heatmap.
195 coordinate_list = [_get_qubit_row_col(qubit) for qubit in self.value_map.keys()]
196 rows = [row for row, _ in coordinate_list]
197 cols = [col for _, col in coordinate_list]
198 min_row, max_row = min(rows), max(rows)
199 min_col, max_col = min(cols), max(cols)
200 height, width = max_row - min_row + 1, max_col - min_col + 1
201 # Construct the (height x width) table of values. Cells with no values
202 # are filled with np.nan.
203 value_table = pd.DataFrame(
204 np.nan, index=range(min_row, max_row + 1), columns=range(min_col, max_col + 1)
205 )
206 for qubit, (float_value, _) in self.value_map.items():
207 row, col = _get_qubit_row_col(qubit)
208 value_table[col][row] = float_value
209 # Construct the (height + 1) x (width + 1) cell boundary tables.
210 x_table = np.array([np.arange(min_col - 0.5, max_col + 1.5)] * (height + 1))
211 y_table = np.array([np.arange(min_row - 0.5, max_row + 1.5)] * (width + 1)).transpose()
212
213 # Construct the URL array as an ordered list of URLs for non-nan cells.
214 url_array: List[str] = []
215 if self.url_map:
216 url_array = [self.url_map.get((row, col), '') for row, col in value_table.stack().index]
217
218 # Plot the heatmap.
219 mesh = ax.pcolor(
220 x_table,
221 y_table,
222 value_table,
223 vmin=self.vmin,
224 vmax=self.vmax,
225 cmap=self.colormap,
226 urls=url_array,
227 **pcolor_options,
228 )
229 mesh.update_scalarmappable()
230 ax.set(xlabel='column', ylabel='row')
231 ax.xaxis.set_ticks(np.arange(min_col, max_col + 1))
232 ax.yaxis.set_ticks(np.arange(min_row, max_row + 1))
233 ax.set_ylim((max_row + 0.5, min_row - 0.5))
234
235 if self.plot_colorbar:
236 self._plot_colorbar(mesh, ax)
237
238 if self.annot_map:
239 self._write_annotations(mesh, ax)
240
241 if show_plot:
242 fig.show()
243
244 return ax, mesh, value_table
245
246 def _plot_colorbar(
247 self, mappable: mpl.cm.ScalarMappable, ax: plt.Axes
248 ) -> mpl.colorbar.Colorbar:
249 """Plots the colorbar. Internal."""
250 colorbar_ax = axes_grid1.make_axes_locatable(ax).append_axes(
251 **self.colorbar_location_options
252 )
253 position = self.colorbar_location_options.get('position', 'right')
254 orien = 'vertical' if position in ('left', 'right') else 'horizontal'
255 colorbar = ax.figure.colorbar(
256 mappable, colorbar_ax, ax, orientation=orien, **self.colorbar_options
257 )
258 colorbar_ax.tick_params(axis='y', direction='out')
259 return colorbar
260
261 def _write_annotations(self, mesh: mpl_collections.Collection, ax: plt.Axes) -> None:
262 """Writes annotations to the center of cells. Internal."""
263 for path, facecolor in zip(mesh.get_paths(), mesh.get_facecolors()):
264 # Calculate the center of the cell, assuming that it is a square
265 # centered at (x=col, y=row).
266 vertices = path.vertices[:4]
267 row = int(round(np.mean([v[1] for v in vertices])))
268 col = int(round(np.mean([v[0] for v in vertices])))
269 annotation = self.annot_map.get((row, col), '')
270 if not annotation:
271 continue
272 face_luminance = relative_luminance(facecolor)
273 text_color = 'black' if face_luminance > 0.4 else 'white'
274 text_kwargs = dict(color=text_color, ha="center", va="center")
275 text_kwargs.update(self.annot_kwargs)
276 ax.text(col, row, annotation, **text_kwargs)
277
[end of cirq/vis/heatmap.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cirq/vis/heatmap.py b/cirq/vis/heatmap.py
--- a/cirq/vis/heatmap.py
+++ b/cirq/vis/heatmap.py
@@ -38,7 +38,7 @@
if isinstance(qubit, grid_qubit.GridQubit):
return qubit.row, qubit.col
elif isinstance(qubit, tuple):
- return qubit[0], qubit[1]
+ return int(qubit[0]), int(qubit[1])
def relative_luminance(color: np.ndarray) -> float:
| {"golden_diff": "diff --git a/cirq/vis/heatmap.py b/cirq/vis/heatmap.py\n--- a/cirq/vis/heatmap.py\n+++ b/cirq/vis/heatmap.py\n@@ -38,7 +38,7 @@\n if isinstance(qubit, grid_qubit.GridQubit):\n return qubit.row, qubit.col\n elif isinstance(qubit, tuple):\n- return qubit[0], qubit[1]\n+ return int(qubit[0]), int(qubit[1])\n \n \n def relative_luminance(color: np.ndarray) -> float:\n", "issue": "Add to heatmap visualization tests\nIn the `test_colorbar` test there is a comment about testing that the position size and pad arguments are respected.\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Heatmap class.\n\nSee examples/bristlecone_heatmap_example.py for an example usage in\nan interactive session.\n\"\"\"\n\nfrom typing import Any, Dict, List, Mapping, Optional, SupportsFloat, Tuple, Union\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom matplotlib import collections as mpl_collections\nfrom mpl_toolkits import axes_grid1\n\nfrom cirq.devices import grid_qubit\n\nQubitCoordinate = Union[Tuple[int, int], grid_qubit.GridQubit]\n\n# The value map is qubit coordinate -> a type that supports float conversion.\nValueMap = Union[Dict[grid_qubit.GridQubit, SupportsFloat], Dict[Tuple[int, int], SupportsFloat]]\n\n\ndef _get_qubit_row_col(qubit: QubitCoordinate) -> Tuple[int, int]:\n if isinstance(qubit, grid_qubit.GridQubit):\n return qubit.row, qubit.col\n elif isinstance(qubit, tuple):\n return qubit[0], qubit[1]\n\n\ndef relative_luminance(color: np.ndarray) -> float:\n \"\"\"Returns the relative luminance according to W3C specification.\n\n Spec: https://www.w3.org/TR/WCAG21/#dfn-relative-luminance.\n\n Args:\n color: a numpy array with the first 3 elements red, green, and blue\n with values in [0, 1].\n Returns:\n relative luminance of color in [0, 1].\n \"\"\"\n rgb = color[:3]\n rgb = np.where(rgb <= 0.03928, rgb / 12.92, ((rgb + 0.055) / 1.055) ** 2.4)\n return rgb.dot([0.2126, 0.7152, 0.0722])\n\n\nclass Heatmap:\n \"\"\"Distribution of a value in 2D qubit lattice as a color map.\"\"\"\n\n def __init__(self, value_map: ValueMap) -> None:\n self.set_value_map(value_map)\n self.annot_map = { # Default annotation.\n _get_qubit_row_col(qubit): format(float(value), '.2g')\n for qubit, value in value_map.items()\n }\n self.annot_kwargs: Dict[str, Any] = {}\n self.unset_url_map()\n self.set_colorbar()\n self.set_colormap()\n\n def set_value_map(self, value_map: ValueMap) -> 'Heatmap':\n \"\"\"Sets the values for each qubit.\n\n Args:\n value_map: the values for determining color for each cell.\n \"\"\"\n # Fail fast if float() fails.\n # Keep the original value object for annotation.\n self.value_map = {qubit: (float(value), value) for qubit, value in value_map.items()}\n return self\n\n def set_annotation_map(\n self, annot_map: Mapping[QubitCoordinate, str], **text_options: str\n ) -> 'Heatmap':\n \"\"\"Sets the annotation text for each qubit.\n\n Note that set_annotation_map() and set_annotation_format()\n both sets the annotation map to be used. Whichever is called later wins.\n\n Args:\n annot_map: the texts to be drawn on each qubit cell.\n text_options: keyword arguments passed to matplotlib.text.Text()\n when drawing the annotation texts.\n \"\"\"\n self.annot_map = {_get_qubit_row_col(qubit): value for qubit, value in annot_map.items()}\n self.annot_kwargs = text_options\n return self\n\n def set_annotation_format(self, annot_format: str, **text_options: str) -> 'Heatmap':\n \"\"\"Sets a format string to format values for each qubit.\n\n Args:\n annot_format: the format string for formatting values.\n text_options: keyword arguments to matplotlib.text.Text().\n \"\"\"\n self.annot_map = {\n _get_qubit_row_col(qubit): format(value[1], annot_format)\n for qubit, value in self.value_map.items()\n }\n self.annot_kwargs = text_options\n return self\n\n def unset_annotation(self) -> 'Heatmap':\n \"\"\"Disables annotation. No texts are shown in cells.\"\"\"\n self.annot_map = {}\n return self\n\n def set_url_map(self, url_map: Mapping[QubitCoordinate, str]) -> 'Heatmap':\n \"\"\"Sets the URLs for each cell.\"\"\"\n self.url_map = {_get_qubit_row_col(qubit): value for qubit, value in url_map.items()}\n return self\n\n def unset_url_map(self) -> 'Heatmap':\n \"\"\"Disables URL. No URLs are associated with cells.\"\"\"\n self.url_map = {}\n return self\n\n def set_colorbar(\n self, position: str = 'right', size: str = '5%', pad: str = '2%', **colorbar_options: Any\n ) -> 'Heatmap':\n \"\"\"Sets location and style of colorbar.\n\n Args:\n position: colorbar position, one of 'left'|'right'|'top'|'bottom'.\n size: a string ending in '%' to specify the width of the colorbar.\n Nominally, '100%' means the same width as the heatmap.\n pad: a string ending in '%' to specify the space between the\n colorbar and the heatmap.\n colorbar_options: keyword arguments passed to\n matplotlib.Figure.colorbar().\n \"\"\"\n self.plot_colorbar = True\n self.colorbar_location_options = {'position': position, 'size': size, 'pad': pad}\n self.colorbar_options = colorbar_options\n return self\n\n def unset_colorbar(self) -> 'Heatmap':\n \"\"\"Disables colorbar. No colorbar is drawn.\"\"\"\n self.plot_colorbar = False\n return self\n\n def set_colormap(\n self,\n colormap: Union[str, mpl.colors.Colormap] = 'viridis',\n vmin: Optional[float] = None,\n vmax: Optional[float] = None,\n ) -> 'Heatmap':\n \"\"\"Sets the colormap.\n\n Args:\n colormap: either a colormap name or a Colormap instance.\n vmin: the minimum value to map to the minimum color. Default is\n the minimum value in value_map.\n vmax: the maximum value to map to the maximum color. Default is\n the maximum value in value_map.\n \"\"\"\n self.colormap = colormap\n self.vmin = vmin\n self.vmax = vmax\n return self\n\n def plot(\n self, ax: Optional[plt.Axes] = None, **pcolor_options: Any\n ) -> Tuple[plt.Axes, mpl_collections.Collection, pd.DataFrame]:\n \"\"\"Plots the heatmap on the given Axes.\n\n Args:\n ax: the Axes to plot on. If not given, a new figure is created,\n plotted on, and shown.\n pcolor_options: keyword arguments passed to ax.pcolor().\n\n Returns:\n A 3-tuple ``(ax, mesh, value_table)``. ``ax`` is the `plt.Axes` that\n is plotted on. ``mesh`` is the collection of paths drawn and filled.\n ``value_table`` is the 2-D pandas DataFrame of values constructed\n from the value_map.\n \"\"\"\n show_plot = not ax\n if not ax:\n fig, ax = plt.subplots(figsize=(8, 8))\n # Find the boundary and size of the heatmap.\n coordinate_list = [_get_qubit_row_col(qubit) for qubit in self.value_map.keys()]\n rows = [row for row, _ in coordinate_list]\n cols = [col for _, col in coordinate_list]\n min_row, max_row = min(rows), max(rows)\n min_col, max_col = min(cols), max(cols)\n height, width = max_row - min_row + 1, max_col - min_col + 1\n # Construct the (height x width) table of values. Cells with no values\n # are filled with np.nan.\n value_table = pd.DataFrame(\n np.nan, index=range(min_row, max_row + 1), columns=range(min_col, max_col + 1)\n )\n for qubit, (float_value, _) in self.value_map.items():\n row, col = _get_qubit_row_col(qubit)\n value_table[col][row] = float_value\n # Construct the (height + 1) x (width + 1) cell boundary tables.\n x_table = np.array([np.arange(min_col - 0.5, max_col + 1.5)] * (height + 1))\n y_table = np.array([np.arange(min_row - 0.5, max_row + 1.5)] * (width + 1)).transpose()\n\n # Construct the URL array as an ordered list of URLs for non-nan cells.\n url_array: List[str] = []\n if self.url_map:\n url_array = [self.url_map.get((row, col), '') for row, col in value_table.stack().index]\n\n # Plot the heatmap.\n mesh = ax.pcolor(\n x_table,\n y_table,\n value_table,\n vmin=self.vmin,\n vmax=self.vmax,\n cmap=self.colormap,\n urls=url_array,\n **pcolor_options,\n )\n mesh.update_scalarmappable()\n ax.set(xlabel='column', ylabel='row')\n ax.xaxis.set_ticks(np.arange(min_col, max_col + 1))\n ax.yaxis.set_ticks(np.arange(min_row, max_row + 1))\n ax.set_ylim((max_row + 0.5, min_row - 0.5))\n\n if self.plot_colorbar:\n self._plot_colorbar(mesh, ax)\n\n if self.annot_map:\n self._write_annotations(mesh, ax)\n\n if show_plot:\n fig.show()\n\n return ax, mesh, value_table\n\n def _plot_colorbar(\n self, mappable: mpl.cm.ScalarMappable, ax: plt.Axes\n ) -> mpl.colorbar.Colorbar:\n \"\"\"Plots the colorbar. Internal.\"\"\"\n colorbar_ax = axes_grid1.make_axes_locatable(ax).append_axes(\n **self.colorbar_location_options\n )\n position = self.colorbar_location_options.get('position', 'right')\n orien = 'vertical' if position in ('left', 'right') else 'horizontal'\n colorbar = ax.figure.colorbar(\n mappable, colorbar_ax, ax, orientation=orien, **self.colorbar_options\n )\n colorbar_ax.tick_params(axis='y', direction='out')\n return colorbar\n\n def _write_annotations(self, mesh: mpl_collections.Collection, ax: plt.Axes) -> None:\n \"\"\"Writes annotations to the center of cells. Internal.\"\"\"\n for path, facecolor in zip(mesh.get_paths(), mesh.get_facecolors()):\n # Calculate the center of the cell, assuming that it is a square\n # centered at (x=col, y=row).\n vertices = path.vertices[:4]\n row = int(round(np.mean([v[1] for v in vertices])))\n col = int(round(np.mean([v[0] for v in vertices])))\n annotation = self.annot_map.get((row, col), '')\n if not annotation:\n continue\n face_luminance = relative_luminance(facecolor)\n text_color = 'black' if face_luminance > 0.4 else 'white'\n text_kwargs = dict(color=text_color, ha=\"center\", va=\"center\")\n text_kwargs.update(self.annot_kwargs)\n ax.text(col, row, annotation, **text_kwargs)\n", "path": "cirq/vis/heatmap.py"}]} | 3,948 | 125 |
gh_patches_debug_19940 | rasdani/github-patches | git_diff | mirumee__ariadne-153 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Raise ValueError when `field` or `source` decorator was called incorrectly
Currently there's no error when the developer forgets to follow the `field` or `source` decorator with `("name")`, tricking them into thinking that decorated function has been registered while in fact it wasn't.
We could update implementation for those functions to raise ValueError when `name` attr is not `str`.
</issue>
<code>
[start of ariadne/objects.py]
1 from typing import Callable, Dict, Optional, cast
2
3 from graphql.type import GraphQLNamedType, GraphQLObjectType, GraphQLSchema
4
5 from .resolvers import resolve_to
6 from .types import Resolver, SchemaBindable
7
8
9 class ObjectType(SchemaBindable):
10 _resolvers: Dict[str, Resolver]
11
12 def __init__(self, name: str) -> None:
13 self.name = name
14 self._resolvers = {}
15
16 def field(self, name: str) -> Callable[[Resolver], Resolver]:
17 return self.create_register_resolver(name)
18
19 def create_register_resolver(self, name: str) -> Callable[[Resolver], Resolver]:
20 def register_resolver(f: Resolver) -> Resolver:
21 self._resolvers[name] = f
22 return f
23
24 return register_resolver
25
26 def set_field(self, name, resolver: Resolver) -> Resolver:
27 self._resolvers[name] = resolver
28 return resolver
29
30 def set_alias(self, name: str, to: str) -> None:
31 self._resolvers[name] = resolve_to(to)
32
33 def bind_to_schema(self, schema: GraphQLSchema) -> None:
34 graphql_type = schema.type_map.get(self.name)
35 self.validate_graphql_type(graphql_type)
36 graphql_type = cast(GraphQLObjectType, graphql_type)
37 self.bind_resolvers_to_graphql_type(graphql_type)
38
39 def validate_graphql_type(self, graphql_type: Optional[GraphQLNamedType]) -> None:
40 if not graphql_type:
41 raise ValueError("Type %s is not defined in the schema" % self.name)
42 if not isinstance(graphql_type, GraphQLObjectType):
43 raise ValueError(
44 "%s is defined in the schema, but it is instance of %s (expected %s)"
45 % (self.name, type(graphql_type).__name__, GraphQLObjectType.__name__)
46 )
47
48 def bind_resolvers_to_graphql_type(self, graphql_type, replace_existing=True):
49 for field, resolver in self._resolvers.items():
50 if field not in graphql_type.fields:
51 raise ValueError(
52 "Field %s is not defined on type %s" % (field, self.name)
53 )
54 if graphql_type.fields[field].resolve is None or replace_existing:
55 graphql_type.fields[field].resolve = resolver
56
57
58 class QueryType(ObjectType):
59 """Convenience class for defining Query type"""
60
61 def __init__(self):
62 super().__init__("Query")
63
64
65 class MutationType(ObjectType):
66 """Convenience class for defining Mutation type"""
67
68 def __init__(self):
69 super().__init__("Mutation")
70
[end of ariadne/objects.py]
[start of ariadne/subscriptions.py]
1 from typing import Callable, Dict
2
3 from graphql.type import GraphQLSchema
4
5 from .objects import ObjectType
6 from .types import Subscriber
7
8
9 class SubscriptionType(ObjectType):
10 _subscribers: Dict[str, Subscriber]
11
12 def __init__(self) -> None:
13 super().__init__("Subscription")
14 self._subscribers = {}
15
16 def source(self, name: str) -> Callable[[Subscriber], Subscriber]:
17 return self.create_register_subscriber(name)
18
19 def create_register_subscriber(
20 self, name: str
21 ) -> Callable[[Subscriber], Subscriber]:
22 def register_subscriber(generator: Subscriber) -> Subscriber:
23 self._subscribers[name] = generator
24 return generator
25
26 return register_subscriber
27
28 def set_source(self, name, generator: Subscriber) -> Subscriber:
29 self._subscribers[name] = generator
30 return generator
31
32 def bind_to_schema(self, schema: GraphQLSchema) -> None:
33 graphql_type = schema.type_map.get(self.name)
34 self.validate_graphql_type(graphql_type)
35 self.bind_resolvers_to_graphql_type(graphql_type)
36 self.bind_subscribers_to_graphql_type(graphql_type)
37
38 def bind_subscribers_to_graphql_type(self, graphql_type):
39 for field, subscriber in self._subscribers.items():
40 if field not in graphql_type.fields:
41 raise ValueError(
42 "Field %s is not defined on type %s" % (field, self.name)
43 )
44
45 graphql_type.fields[field].subscribe = subscriber
46
[end of ariadne/subscriptions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ariadne/objects.py b/ariadne/objects.py
--- a/ariadne/objects.py
+++ b/ariadne/objects.py
@@ -14,6 +14,10 @@
self._resolvers = {}
def field(self, name: str) -> Callable[[Resolver], Resolver]:
+ if not isinstance(name, str):
+ raise ValueError(
+ 'field decorator should be passed a field name: @foo.field("name")'
+ )
return self.create_register_resolver(name)
def create_register_resolver(self, name: str) -> Callable[[Resolver], Resolver]:
diff --git a/ariadne/subscriptions.py b/ariadne/subscriptions.py
--- a/ariadne/subscriptions.py
+++ b/ariadne/subscriptions.py
@@ -14,6 +14,10 @@
self._subscribers = {}
def source(self, name: str) -> Callable[[Subscriber], Subscriber]:
+ if not isinstance(name, str):
+ raise ValueError(
+ 'source decorator should be passed a field name: @foo.source("name")'
+ )
return self.create_register_subscriber(name)
def create_register_subscriber(
| {"golden_diff": "diff --git a/ariadne/objects.py b/ariadne/objects.py\n--- a/ariadne/objects.py\n+++ b/ariadne/objects.py\n@@ -14,6 +14,10 @@\n self._resolvers = {}\n \n def field(self, name: str) -> Callable[[Resolver], Resolver]:\n+ if not isinstance(name, str):\n+ raise ValueError(\n+ 'field decorator should be passed a field name: @foo.field(\"name\")'\n+ )\n return self.create_register_resolver(name)\n \n def create_register_resolver(self, name: str) -> Callable[[Resolver], Resolver]:\ndiff --git a/ariadne/subscriptions.py b/ariadne/subscriptions.py\n--- a/ariadne/subscriptions.py\n+++ b/ariadne/subscriptions.py\n@@ -14,6 +14,10 @@\n self._subscribers = {}\n \n def source(self, name: str) -> Callable[[Subscriber], Subscriber]:\n+ if not isinstance(name, str):\n+ raise ValueError(\n+ 'source decorator should be passed a field name: @foo.source(\"name\")'\n+ )\n return self.create_register_subscriber(name)\n \n def create_register_subscriber(\n", "issue": "Raise ValueError when `field` or `source` decorator was called incorrectly\nCurrently there's no error when the developer forgets to follow the `field` or `source` decorator with `(\"name\")`, tricking them into thinking that decorated function has been registered while in fact it wasn't.\r\n\r\nWe could update implementation for those functions to raise ValueError when `name` attr is not `str`.\n", "before_files": [{"content": "from typing import Callable, Dict, Optional, cast\n\nfrom graphql.type import GraphQLNamedType, GraphQLObjectType, GraphQLSchema\n\nfrom .resolvers import resolve_to\nfrom .types import Resolver, SchemaBindable\n\n\nclass ObjectType(SchemaBindable):\n _resolvers: Dict[str, Resolver]\n\n def __init__(self, name: str) -> None:\n self.name = name\n self._resolvers = {}\n\n def field(self, name: str) -> Callable[[Resolver], Resolver]:\n return self.create_register_resolver(name)\n\n def create_register_resolver(self, name: str) -> Callable[[Resolver], Resolver]:\n def register_resolver(f: Resolver) -> Resolver:\n self._resolvers[name] = f\n return f\n\n return register_resolver\n\n def set_field(self, name, resolver: Resolver) -> Resolver:\n self._resolvers[name] = resolver\n return resolver\n\n def set_alias(self, name: str, to: str) -> None:\n self._resolvers[name] = resolve_to(to)\n\n def bind_to_schema(self, schema: GraphQLSchema) -> None:\n graphql_type = schema.type_map.get(self.name)\n self.validate_graphql_type(graphql_type)\n graphql_type = cast(GraphQLObjectType, graphql_type)\n self.bind_resolvers_to_graphql_type(graphql_type)\n\n def validate_graphql_type(self, graphql_type: Optional[GraphQLNamedType]) -> None:\n if not graphql_type:\n raise ValueError(\"Type %s is not defined in the schema\" % self.name)\n if not isinstance(graphql_type, GraphQLObjectType):\n raise ValueError(\n \"%s is defined in the schema, but it is instance of %s (expected %s)\"\n % (self.name, type(graphql_type).__name__, GraphQLObjectType.__name__)\n )\n\n def bind_resolvers_to_graphql_type(self, graphql_type, replace_existing=True):\n for field, resolver in self._resolvers.items():\n if field not in graphql_type.fields:\n raise ValueError(\n \"Field %s is not defined on type %s\" % (field, self.name)\n )\n if graphql_type.fields[field].resolve is None or replace_existing:\n graphql_type.fields[field].resolve = resolver\n\n\nclass QueryType(ObjectType):\n \"\"\"Convenience class for defining Query type\"\"\"\n\n def __init__(self):\n super().__init__(\"Query\")\n\n\nclass MutationType(ObjectType):\n \"\"\"Convenience class for defining Mutation type\"\"\"\n\n def __init__(self):\n super().__init__(\"Mutation\")\n", "path": "ariadne/objects.py"}, {"content": "from typing import Callable, Dict\n\nfrom graphql.type import GraphQLSchema\n\nfrom .objects import ObjectType\nfrom .types import Subscriber\n\n\nclass SubscriptionType(ObjectType):\n _subscribers: Dict[str, Subscriber]\n\n def __init__(self) -> None:\n super().__init__(\"Subscription\")\n self._subscribers = {}\n\n def source(self, name: str) -> Callable[[Subscriber], Subscriber]:\n return self.create_register_subscriber(name)\n\n def create_register_subscriber(\n self, name: str\n ) -> Callable[[Subscriber], Subscriber]:\n def register_subscriber(generator: Subscriber) -> Subscriber:\n self._subscribers[name] = generator\n return generator\n\n return register_subscriber\n\n def set_source(self, name, generator: Subscriber) -> Subscriber:\n self._subscribers[name] = generator\n return generator\n\n def bind_to_schema(self, schema: GraphQLSchema) -> None:\n graphql_type = schema.type_map.get(self.name)\n self.validate_graphql_type(graphql_type)\n self.bind_resolvers_to_graphql_type(graphql_type)\n self.bind_subscribers_to_graphql_type(graphql_type)\n\n def bind_subscribers_to_graphql_type(self, graphql_type):\n for field, subscriber in self._subscribers.items():\n if field not in graphql_type.fields:\n raise ValueError(\n \"Field %s is not defined on type %s\" % (field, self.name)\n )\n\n graphql_type.fields[field].subscribe = subscriber\n", "path": "ariadne/subscriptions.py"}]} | 1,714 | 270 |
gh_patches_debug_1362 | rasdani/github-patches | git_diff | UTNkar__moore-59 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Login is per-subdomain
</issue>
<code>
[start of website/website/settings/production.py]
1 """
2 Django settings for the production environment of Project Moore.
3
4 For more information regarding running in production see,
5 See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
6
7 For more information on this file, see
8 https://docs.djangoproject.com/en/1.10/topics/settings/
9
10 For the full list of settings and their values, see
11 https://docs.djangoproject.com/en/1.10/ref/settings/
12 """
13 from __future__ import absolute_import, unicode_literals
14
15 from .base import *
16
17 # SECURITY WARNING: don't run with debug turned on in production!
18 DEBUG = False
19
20 # SECURITY WARNING: keep the secret key used in production secret!
21 SECRET_KEY = os.environ.get(
22 'DJANGO_SECRET',
23 'za7^0@54n&p-dg4)_l12q_3^o5awz_uym0osqaz2!myki_8kw0'
24 )
25
26 # Database
27 # https://docs.djangoproject.com/en/1.10/ref/settings/#databases
28
29 DATABASES = {
30 'default': {
31 'ENGINE': 'django.db.backends.postgresql',
32 'NAME': os.environ.get('DJANGO_DB'),
33 'USER': os.environ.get('DJANGO_DB_USER'),
34 'PASSWORD': os.environ.get('DJANGO_DB_PASS'),
35 'HOST': os.environ.get('DJANGO_DB_HOST', '127.0.0.1'),
36 'PORT': os.environ.get('DJANGO_DB_PORT', '5432'),
37 }
38 }
39
40 # CONN_MAX_AGE = 0
41
42 # Base URL to use when referring to full URLs within the Wagtail admin
43 # backend - e.g. in notification emails. Don't include '/admin' or a
44 # trailing slash
45 BASE_URL = 'https://dev.utn.se'
46
47 ALLOWED_HOSTS = ['.utn.se']
48
49 # Email settings
50 DEFAULT_FROM_EMAIL = '[email protected]'
51
52 EMAIL_SUBJECT_PREFIX = '[UTN] '
53
54 # Admins - will be sent error messages
55 ADMINS = [('UTN System Administrator', '[email protected]')]
56
57 LOGGING_CONFIG = None
58
59 # TODO: HTTPS security
60 # CSRF_COOKIE_SECURE = True
61 #
62 # SESSION_COOKIE_SECURE = True
63
64 # Membership API
65 MEMBERSHIP_API_USER = 'moore'
66 MEMBERSHIP_API_PASSWORD = os.environ.get('MEMBERSHIP_API_PASSWORD')
67
68 try:
69 from .local import *
70 except ImportError:
71 pass
72
[end of website/website/settings/production.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/website/settings/production.py b/website/website/settings/production.py
--- a/website/website/settings/production.py
+++ b/website/website/settings/production.py
@@ -56,10 +56,11 @@
LOGGING_CONFIG = None
-# TODO: HTTPS security
-# CSRF_COOKIE_SECURE = True
-#
-# SESSION_COOKIE_SECURE = True
+CSRF_COOKIE_SECURE = True
+
+SESSION_COOKIE_DOMAIN = '.utn.se'
+
+SESSION_COOKIE_SECURE = True
# Membership API
MEMBERSHIP_API_USER = 'moore'
| {"golden_diff": "diff --git a/website/website/settings/production.py b/website/website/settings/production.py\n--- a/website/website/settings/production.py\n+++ b/website/website/settings/production.py\n@@ -56,10 +56,11 @@\n \n LOGGING_CONFIG = None\n \n-# TODO: HTTPS security\n-# CSRF_COOKIE_SECURE = True\n-#\n-# SESSION_COOKIE_SECURE = True\n+CSRF_COOKIE_SECURE = True\n+\n+SESSION_COOKIE_DOMAIN = '.utn.se'\n+\n+SESSION_COOKIE_SECURE = True\n \n # Membership API\n MEMBERSHIP_API_USER = 'moore'\n", "issue": "Login is per-subdomain\n\n", "before_files": [{"content": "\"\"\"\nDjango settings for the production environment of Project Moore.\n\nFor more information regarding running in production see,\nSee https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.10/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.10/ref/settings/\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nfrom .base import *\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get(\n 'DJANGO_SECRET',\n 'za7^0@54n&p-dg4)_l12q_3^o5awz_uym0osqaz2!myki_8kw0'\n)\n\n# Database\n# https://docs.djangoproject.com/en/1.10/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': os.environ.get('DJANGO_DB'),\n 'USER': os.environ.get('DJANGO_DB_USER'),\n 'PASSWORD': os.environ.get('DJANGO_DB_PASS'),\n 'HOST': os.environ.get('DJANGO_DB_HOST', '127.0.0.1'),\n 'PORT': os.environ.get('DJANGO_DB_PORT', '5432'),\n }\n}\n\n# CONN_MAX_AGE = 0\n\n# Base URL to use when referring to full URLs within the Wagtail admin\n# backend - e.g. in notification emails. Don't include '/admin' or a\n# trailing slash\nBASE_URL = 'https://dev.utn.se'\n\nALLOWED_HOSTS = ['.utn.se']\n\n# Email settings\nDEFAULT_FROM_EMAIL = '[email protected]'\n\nEMAIL_SUBJECT_PREFIX = '[UTN] '\n\n# Admins - will be sent error messages\nADMINS = [('UTN System Administrator', '[email protected]')]\n\nLOGGING_CONFIG = None\n\n# TODO: HTTPS security\n# CSRF_COOKIE_SECURE = True\n#\n# SESSION_COOKIE_SECURE = True\n\n# Membership API\nMEMBERSHIP_API_USER = 'moore'\nMEMBERSHIP_API_PASSWORD = os.environ.get('MEMBERSHIP_API_PASSWORD')\n\ntry:\n from .local import *\nexcept ImportError:\n pass\n", "path": "website/website/settings/production.py"}]} | 1,206 | 131 |
gh_patches_debug_3121 | rasdani/github-patches | git_diff | mirumee__ariadne-961 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support Starlette 0.21.0
Starlette 0.21.0 fix important issues on the BaseHttpMiddleware side.
https://github.com/encode/starlette/pull/1715
https://github.com/tiangolo/fastapi/issues/4544
</issue>
<code>
[start of setup.py]
1 #! /usr/bin/env python
2 import os
3 from setuptools import setup
4
5 CLASSIFIERS = [
6 "Development Status :: 4 - Beta",
7 "Intended Audience :: Developers",
8 "License :: OSI Approved :: BSD License",
9 "Operating System :: OS Independent",
10 "Programming Language :: Python",
11 "Programming Language :: Python :: 3.7",
12 "Programming Language :: Python :: 3.8",
13 "Programming Language :: Python :: 3.9",
14 "Programming Language :: Python :: 3.10",
15 "Topic :: Software Development :: Libraries :: Python Modules",
16 ]
17
18 README_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md")
19 with open(README_PATH, "r", encoding="utf8") as f:
20 README = f.read()
21
22 setup(
23 name="ariadne",
24 author="Mirumee Software",
25 author_email="[email protected]",
26 description="Ariadne is a Python library for implementing GraphQL servers.",
27 long_description=README,
28 long_description_content_type="text/markdown",
29 license="BSD",
30 version="0.16.1",
31 url="https://github.com/mirumee/ariadne",
32 packages=["ariadne"],
33 include_package_data=True,
34 install_requires=[
35 "graphql-core>=3.2.0,<3.3",
36 "starlette>0.17,<0.21",
37 "typing_extensions>=3.6.0",
38 ],
39 extras_require={"asgi-file-uploads": ["python-multipart>=0.0.5"]},
40 classifiers=CLASSIFIERS,
41 platforms=["any"],
42 zip_safe=False,
43 )
44
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -33,7 +33,7 @@
include_package_data=True,
install_requires=[
"graphql-core>=3.2.0,<3.3",
- "starlette>0.17,<0.21",
+ "starlette>0.17,<1.0",
"typing_extensions>=3.6.0",
],
extras_require={"asgi-file-uploads": ["python-multipart>=0.0.5"]},
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,7 +33,7 @@\n include_package_data=True,\n install_requires=[\n \"graphql-core>=3.2.0,<3.3\",\n- \"starlette>0.17,<0.21\",\n+ \"starlette>0.17,<1.0\",\n \"typing_extensions>=3.6.0\",\n ],\n extras_require={\"asgi-file-uploads\": [\"python-multipart>=0.0.5\"]},\n", "issue": "Support Starlette 0.21.0\nStarlette 0.21.0 fix important issues on the BaseHttpMiddleware side. \r\n\r\nhttps://github.com/encode/starlette/pull/1715\r\nhttps://github.com/tiangolo/fastapi/issues/4544\n", "before_files": [{"content": "#! /usr/bin/env python\nimport os\nfrom setuptools import setup\n\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nREADME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"README.md\")\nwith open(README_PATH, \"r\", encoding=\"utf8\") as f:\n README = f.read()\n\nsetup(\n name=\"ariadne\",\n author=\"Mirumee Software\",\n author_email=\"[email protected]\",\n description=\"Ariadne is a Python library for implementing GraphQL servers.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n version=\"0.16.1\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n include_package_data=True,\n install_requires=[\n \"graphql-core>=3.2.0,<3.3\",\n \"starlette>0.17,<0.21\",\n \"typing_extensions>=3.6.0\",\n ],\n extras_require={\"asgi-file-uploads\": [\"python-multipart>=0.0.5\"]},\n classifiers=CLASSIFIERS,\n platforms=[\"any\"],\n zip_safe=False,\n)\n", "path": "setup.py"}]} | 1,033 | 123 |
gh_patches_debug_14624 | rasdani/github-patches | git_diff | freedomofpress__securedrop-2426 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Only show source codename on first session
# Feature request
## Description
A source is shown their codename on the `/lookup` page to help them remember it on their first session. This hint should not be shown again after the source logs out. The reason being is in the future, we maybe remove the actual codename from the cookie to prevent it from being used to login indefinitely (either by encrypting it or using some uuid that maps to the codename per session). If we show this on subsequent logins, an attack could steal the cookie, not learn the codename, but login see the codename, and then login again indefinitely.
Plus, is a source has logged in successfully, they definitely know their codename, and there is little (nothing?) to be gained by showing it again.
</issue>
<code>
[start of securedrop/source_app/main.py]
1 import operator
2 import os
3
4 from datetime import datetime
5 from flask import (Blueprint, render_template, flash, redirect, url_for, g,
6 session, current_app, request, Markup, abort)
7 from flask_babel import gettext
8 from sqlalchemy.exc import IntegrityError
9
10 import crypto_util
11 import store
12
13 from db import Source, db_session, Submission, Reply, get_one_or_else
14 from rm import srm
15 from source_app.decorators import login_required
16 from source_app.utils import (logged_in, generate_unique_codename,
17 async_genkey, normalize_timestamps,
18 valid_codename)
19 from source_app.forms import LoginForm
20
21
22 def make_blueprint(config):
23 view = Blueprint('main', __name__)
24
25 @view.route('/')
26 def index():
27 return render_template('index.html')
28
29 @view.route('/generate', methods=('GET', 'POST'))
30 def generate():
31 if logged_in():
32 flash(gettext(
33 "You were redirected because you are already logged in. "
34 "If you want to create a new account, you should log out "
35 "first."),
36 "notification")
37 return redirect(url_for('.lookup'))
38
39 codename = generate_unique_codename()
40 session['codename'] = codename
41 return render_template('generate.html', codename=codename)
42
43 @view.route('/create', methods=['POST'])
44 def create():
45 filesystem_id = crypto_util.hash_codename(session['codename'])
46
47 source = Source(filesystem_id, crypto_util.display_id())
48 db_session.add(source)
49 try:
50 db_session.commit()
51 except IntegrityError as e:
52 db_session.rollback()
53 current_app.logger.error(
54 "Attempt to create a source with duplicate codename: %s" %
55 (e,))
56 else:
57 os.mkdir(store.path(filesystem_id))
58
59 session['logged_in'] = True
60 return redirect(url_for('.lookup'))
61
62 @view.route('/lookup', methods=('GET',))
63 @login_required
64 def lookup():
65 replies = []
66 for reply in g.source.replies:
67 reply_path = store.path(g.filesystem_id, reply.filename)
68 try:
69 reply.decrypted = crypto_util.decrypt(
70 g.codename,
71 open(reply_path).read()).decode('utf-8')
72 except UnicodeDecodeError:
73 current_app.logger.error("Could not decode reply %s" %
74 reply.filename)
75 else:
76 reply.date = datetime.utcfromtimestamp(
77 os.stat(reply_path).st_mtime)
78 replies.append(reply)
79
80 # Sort the replies by date
81 replies.sort(key=operator.attrgetter('date'), reverse=True)
82
83 # Generate a keypair to encrypt replies from the journalist
84 # Only do this if the journalist has flagged the source as one
85 # that they would like to reply to. (Issue #140.)
86 if not crypto_util.getkey(g.filesystem_id) and g.source.flagged:
87 async_genkey(g.filesystem_id, g.codename)
88
89 return render_template(
90 'lookup.html',
91 codename=g.codename,
92 replies=replies,
93 flagged=g.source.flagged,
94 haskey=crypto_util.getkey(
95 g.filesystem_id))
96
97 @view.route('/submit', methods=('POST',))
98 @login_required
99 def submit():
100 msg = request.form['msg']
101 fh = request.files['fh']
102
103 # Don't submit anything if it was an "empty" submission. #878
104 if not (msg or fh):
105 flash(gettext(
106 "You must enter a message or choose a file to submit."),
107 "error")
108 return redirect(url_for('main.lookup'))
109
110 fnames = []
111 journalist_filename = g.source.journalist_filename
112 first_submission = g.source.interaction_count == 0
113
114 if msg:
115 g.source.interaction_count += 1
116 fnames.append(
117 store.save_message_submission(
118 g.filesystem_id,
119 g.source.interaction_count,
120 journalist_filename,
121 msg))
122 if fh:
123 g.source.interaction_count += 1
124 fnames.append(
125 store.save_file_submission(
126 g.filesystem_id,
127 g.source.interaction_count,
128 journalist_filename,
129 fh.filename,
130 fh.stream))
131
132 if first_submission:
133 msg = render_template('first_submission_flashed_message.html')
134 flash(Markup(msg), "success")
135
136 else:
137 if msg and not fh:
138 html_contents = gettext('Thanks! We received your message.')
139 elif not msg and fh:
140 html_contents = gettext('Thanks! We received your document.')
141 else:
142 html_contents = gettext('Thanks! We received your message and '
143 'document.')
144
145 msg = render_template('next_submission_flashed_message.html',
146 html_contents=html_contents)
147 flash(Markup(msg), "success")
148
149 for fname in fnames:
150 submission = Submission(g.source, fname)
151 db_session.add(submission)
152
153 if g.source.pending:
154 g.source.pending = False
155
156 # Generate a keypair now, if there's enough entropy (issue #303)
157 entropy_avail = int(
158 open('/proc/sys/kernel/random/entropy_avail').read())
159 if entropy_avail >= 2400:
160 async_genkey(g.filesystem_id, g.codename)
161
162 g.source.last_updated = datetime.utcnow()
163 db_session.commit()
164 normalize_timestamps(g.filesystem_id)
165
166 return redirect(url_for('main.lookup'))
167
168 @view.route('/delete', methods=('POST',))
169 @login_required
170 def delete():
171 query = Reply.query.filter(
172 Reply.filename == request.form['reply_filename'])
173 reply = get_one_or_else(query, current_app.logger, abort)
174 srm(store.path(g.filesystem_id, reply.filename))
175 db_session.delete(reply)
176 db_session.commit()
177
178 flash(gettext("Reply deleted"), "notification")
179 return redirect(url_for('.lookup'))
180
181 @view.route('/delete-all', methods=('POST',))
182 @login_required
183 def batch_delete():
184 replies = g.source.replies
185 if len(replies) == 0:
186 current_app.logger.error("Found no replies when at least one was "
187 "expected")
188 return redirect(url_for('.lookup'))
189
190 for reply in replies:
191 srm(store.path(g.filesystem_id, reply.filename))
192 db_session.delete(reply)
193 db_session.commit()
194
195 flash(gettext("All replies have been deleted"), "notification")
196 return redirect(url_for('.lookup'))
197
198 @view.route('/login', methods=('GET', 'POST'))
199 def login():
200 form = LoginForm()
201 if form.validate_on_submit():
202 codename = request.form['codename'].strip()
203 if valid_codename(codename):
204 session.update(codename=codename, logged_in=True)
205 return redirect(url_for('.lookup', from_login='1'))
206 else:
207 current_app.logger.info(
208 "Login failed for invalid codename".format(codename))
209 flash(gettext("Sorry, that is not a recognized codename."),
210 "error")
211 return render_template('login.html', form=form)
212
213 @view.route('/logout')
214 def logout():
215 if logged_in():
216 session.clear()
217 msg = render_template('logout_flashed_message.html')
218 flash(Markup(msg), "important hide-if-not-tor-browser")
219 return redirect(url_for('.index'))
220
221 return view
222
[end of securedrop/source_app/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/securedrop/source_app/main.py b/securedrop/source_app/main.py
--- a/securedrop/source_app/main.py
+++ b/securedrop/source_app/main.py
@@ -38,6 +38,7 @@
codename = generate_unique_codename()
session['codename'] = codename
+ session['new_user'] = True
return render_template('generate.html', codename=codename)
@view.route('/create', methods=['POST'])
@@ -91,6 +92,7 @@
codename=g.codename,
replies=replies,
flagged=g.source.flagged,
+ new_user=session.get('new_user', None),
haskey=crypto_util.getkey(
g.filesystem_id))
| {"golden_diff": "diff --git a/securedrop/source_app/main.py b/securedrop/source_app/main.py\n--- a/securedrop/source_app/main.py\n+++ b/securedrop/source_app/main.py\n@@ -38,6 +38,7 @@\n \n codename = generate_unique_codename()\n session['codename'] = codename\n+ session['new_user'] = True\n return render_template('generate.html', codename=codename)\n \n @view.route('/create', methods=['POST'])\n@@ -91,6 +92,7 @@\n codename=g.codename,\n replies=replies,\n flagged=g.source.flagged,\n+ new_user=session.get('new_user', None),\n haskey=crypto_util.getkey(\n g.filesystem_id))\n", "issue": "Only show source codename on first session\n# Feature request\r\n\r\n## Description\r\n\r\nA source is shown their codename on the `/lookup` page to help them remember it on their first session. This hint should not be shown again after the source logs out. The reason being is in the future, we maybe remove the actual codename from the cookie to prevent it from being used to login indefinitely (either by encrypting it or using some uuid that maps to the codename per session). If we show this on subsequent logins, an attack could steal the cookie, not learn the codename, but login see the codename, and then login again indefinitely.\r\n\r\nPlus, is a source has logged in successfully, they definitely know their codename, and there is little (nothing?) to be gained by showing it again.\n", "before_files": [{"content": "import operator\nimport os\n\nfrom datetime import datetime\nfrom flask import (Blueprint, render_template, flash, redirect, url_for, g,\n session, current_app, request, Markup, abort)\nfrom flask_babel import gettext\nfrom sqlalchemy.exc import IntegrityError\n\nimport crypto_util\nimport store\n\nfrom db import Source, db_session, Submission, Reply, get_one_or_else\nfrom rm import srm\nfrom source_app.decorators import login_required\nfrom source_app.utils import (logged_in, generate_unique_codename,\n async_genkey, normalize_timestamps,\n valid_codename)\nfrom source_app.forms import LoginForm\n\n\ndef make_blueprint(config):\n view = Blueprint('main', __name__)\n\n @view.route('/')\n def index():\n return render_template('index.html')\n\n @view.route('/generate', methods=('GET', 'POST'))\n def generate():\n if logged_in():\n flash(gettext(\n \"You were redirected because you are already logged in. \"\n \"If you want to create a new account, you should log out \"\n \"first.\"),\n \"notification\")\n return redirect(url_for('.lookup'))\n\n codename = generate_unique_codename()\n session['codename'] = codename\n return render_template('generate.html', codename=codename)\n\n @view.route('/create', methods=['POST'])\n def create():\n filesystem_id = crypto_util.hash_codename(session['codename'])\n\n source = Source(filesystem_id, crypto_util.display_id())\n db_session.add(source)\n try:\n db_session.commit()\n except IntegrityError as e:\n db_session.rollback()\n current_app.logger.error(\n \"Attempt to create a source with duplicate codename: %s\" %\n (e,))\n else:\n os.mkdir(store.path(filesystem_id))\n\n session['logged_in'] = True\n return redirect(url_for('.lookup'))\n\n @view.route('/lookup', methods=('GET',))\n @login_required\n def lookup():\n replies = []\n for reply in g.source.replies:\n reply_path = store.path(g.filesystem_id, reply.filename)\n try:\n reply.decrypted = crypto_util.decrypt(\n g.codename,\n open(reply_path).read()).decode('utf-8')\n except UnicodeDecodeError:\n current_app.logger.error(\"Could not decode reply %s\" %\n reply.filename)\n else:\n reply.date = datetime.utcfromtimestamp(\n os.stat(reply_path).st_mtime)\n replies.append(reply)\n\n # Sort the replies by date\n replies.sort(key=operator.attrgetter('date'), reverse=True)\n\n # Generate a keypair to encrypt replies from the journalist\n # Only do this if the journalist has flagged the source as one\n # that they would like to reply to. (Issue #140.)\n if not crypto_util.getkey(g.filesystem_id) and g.source.flagged:\n async_genkey(g.filesystem_id, g.codename)\n\n return render_template(\n 'lookup.html',\n codename=g.codename,\n replies=replies,\n flagged=g.source.flagged,\n haskey=crypto_util.getkey(\n g.filesystem_id))\n\n @view.route('/submit', methods=('POST',))\n @login_required\n def submit():\n msg = request.form['msg']\n fh = request.files['fh']\n\n # Don't submit anything if it was an \"empty\" submission. #878\n if not (msg or fh):\n flash(gettext(\n \"You must enter a message or choose a file to submit.\"),\n \"error\")\n return redirect(url_for('main.lookup'))\n\n fnames = []\n journalist_filename = g.source.journalist_filename\n first_submission = g.source.interaction_count == 0\n\n if msg:\n g.source.interaction_count += 1\n fnames.append(\n store.save_message_submission(\n g.filesystem_id,\n g.source.interaction_count,\n journalist_filename,\n msg))\n if fh:\n g.source.interaction_count += 1\n fnames.append(\n store.save_file_submission(\n g.filesystem_id,\n g.source.interaction_count,\n journalist_filename,\n fh.filename,\n fh.stream))\n\n if first_submission:\n msg = render_template('first_submission_flashed_message.html')\n flash(Markup(msg), \"success\")\n\n else:\n if msg and not fh:\n html_contents = gettext('Thanks! We received your message.')\n elif not msg and fh:\n html_contents = gettext('Thanks! We received your document.')\n else:\n html_contents = gettext('Thanks! We received your message and '\n 'document.')\n\n msg = render_template('next_submission_flashed_message.html',\n html_contents=html_contents)\n flash(Markup(msg), \"success\")\n\n for fname in fnames:\n submission = Submission(g.source, fname)\n db_session.add(submission)\n\n if g.source.pending:\n g.source.pending = False\n\n # Generate a keypair now, if there's enough entropy (issue #303)\n entropy_avail = int(\n open('/proc/sys/kernel/random/entropy_avail').read())\n if entropy_avail >= 2400:\n async_genkey(g.filesystem_id, g.codename)\n\n g.source.last_updated = datetime.utcnow()\n db_session.commit()\n normalize_timestamps(g.filesystem_id)\n\n return redirect(url_for('main.lookup'))\n\n @view.route('/delete', methods=('POST',))\n @login_required\n def delete():\n query = Reply.query.filter(\n Reply.filename == request.form['reply_filename'])\n reply = get_one_or_else(query, current_app.logger, abort)\n srm(store.path(g.filesystem_id, reply.filename))\n db_session.delete(reply)\n db_session.commit()\n\n flash(gettext(\"Reply deleted\"), \"notification\")\n return redirect(url_for('.lookup'))\n\n @view.route('/delete-all', methods=('POST',))\n @login_required\n def batch_delete():\n replies = g.source.replies\n if len(replies) == 0:\n current_app.logger.error(\"Found no replies when at least one was \"\n \"expected\")\n return redirect(url_for('.lookup'))\n\n for reply in replies:\n srm(store.path(g.filesystem_id, reply.filename))\n db_session.delete(reply)\n db_session.commit()\n\n flash(gettext(\"All replies have been deleted\"), \"notification\")\n return redirect(url_for('.lookup'))\n\n @view.route('/login', methods=('GET', 'POST'))\n def login():\n form = LoginForm()\n if form.validate_on_submit():\n codename = request.form['codename'].strip()\n if valid_codename(codename):\n session.update(codename=codename, logged_in=True)\n return redirect(url_for('.lookup', from_login='1'))\n else:\n current_app.logger.info(\n \"Login failed for invalid codename\".format(codename))\n flash(gettext(\"Sorry, that is not a recognized codename.\"),\n \"error\")\n return render_template('login.html', form=form)\n\n @view.route('/logout')\n def logout():\n if logged_in():\n session.clear()\n msg = render_template('logout_flashed_message.html')\n flash(Markup(msg), \"important hide-if-not-tor-browser\")\n return redirect(url_for('.index'))\n\n return view\n", "path": "securedrop/source_app/main.py"}]} | 2,835 | 166 |
gh_patches_debug_5839 | rasdani/github-patches | git_diff | boto__boto-2116 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
In 2.25.0 GoogleStorage generate_url() returns url with AWSAccessKeyId instead of GoogleAccessId
When generating a Google Storage signed url:
``` python
conn = boto.connect_gs(gs_access_key, gs_secret_access)
signed_url = conn.generate_url(900, 'GET', bucket=bucket_name, key=file_name)
```
signed_url is of the form:
https://bucket_name.storage.googleapis.com/file_name?Signature=foo&Expires=1392808714&**AWSAccessKeyId**=gs_access_key
But should be of the form:
https://bucket_name.storage.googleapis.com/file_name?Signature=foo&Expires=1392808714&**GoogleAccessId**=gs_access_key
</issue>
<code>
[start of boto/gs/connection.py]
1 # Copyright 2010 Google Inc.
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a
4 # copy of this software and associated documentation files (the
5 # "Software"), to deal in the Software without restriction, including
6 # without limitation the rights to use, copy, modify, merge, publish, dis-
7 # tribute, sublicense, and/or sell copies of the Software, and to permit
8 # persons to whom the Software is furnished to do so, subject to the fol-
9 # lowing conditions:
10 #
11 # The above copyright notice and this permission notice shall be included
12 # in all copies or substantial portions of the Software.
13 #
14 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
16 # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
17 # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20 # IN THE SOFTWARE.
21
22 from boto.gs.bucket import Bucket
23 from boto.s3.connection import S3Connection
24 from boto.s3.connection import SubdomainCallingFormat
25 from boto.s3.connection import check_lowercase_bucketname
26 from boto.utils import get_utf8_value
27
28 class Location(object):
29 DEFAULT = 'US'
30 EU = 'EU'
31
32 class GSConnection(S3Connection):
33
34 DefaultHost = 'storage.googleapis.com'
35 QueryString = 'Signature=%s&Expires=%d&AWSAccessKeyId=%s'
36
37 def __init__(self, gs_access_key_id=None, gs_secret_access_key=None,
38 is_secure=True, port=None, proxy=None, proxy_port=None,
39 proxy_user=None, proxy_pass=None,
40 host=DefaultHost, debug=0, https_connection_factory=None,
41 calling_format=SubdomainCallingFormat(), path='/',
42 suppress_consec_slashes=True):
43 super(GSConnection, self).__init__(gs_access_key_id, gs_secret_access_key,
44 is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
45 host, debug, https_connection_factory, calling_format, path,
46 "google", Bucket,
47 suppress_consec_slashes=suppress_consec_slashes)
48
49 def create_bucket(self, bucket_name, headers=None,
50 location=Location.DEFAULT, policy=None,
51 storage_class='STANDARD'):
52 """
53 Creates a new bucket. By default it's located in the USA. You can
54 pass Location.EU to create bucket in the EU. You can also pass
55 a LocationConstraint for where the bucket should be located, and
56 a StorageClass describing how the data should be stored.
57
58 :type bucket_name: string
59 :param bucket_name: The name of the new bucket.
60
61 :type headers: dict
62 :param headers: Additional headers to pass along with the request to GCS.
63
64 :type location: :class:`boto.gs.connection.Location`
65 :param location: The location of the new bucket.
66
67 :type policy: :class:`boto.gs.acl.CannedACLStrings`
68 :param policy: A canned ACL policy that will be applied to the new key
69 in GCS.
70
71 :type storage_class: string
72 :param storage_class: Either 'STANDARD' or 'DURABLE_REDUCED_AVAILABILITY'.
73
74 """
75 check_lowercase_bucketname(bucket_name)
76
77 if policy:
78 if headers:
79 headers[self.provider.acl_header] = policy
80 else:
81 headers = {self.provider.acl_header : policy}
82 if not location:
83 location = Location.DEFAULT
84 location_elem = ('<LocationConstraint>%s</LocationConstraint>'
85 % location)
86 if storage_class:
87 storage_class_elem = ('<StorageClass>%s</StorageClass>'
88 % storage_class)
89 else:
90 storage_class_elem = ''
91 data = ('<CreateBucketConfiguration>%s%s</CreateBucketConfiguration>'
92 % (location_elem, storage_class_elem))
93 response = self.make_request(
94 'PUT', get_utf8_value(bucket_name), headers=headers,
95 data=get_utf8_value(data))
96 body = response.read()
97 if response.status == 409:
98 raise self.provider.storage_create_error(
99 response.status, response.reason, body)
100 if response.status == 200:
101 return self.bucket_class(self, bucket_name)
102 else:
103 raise self.provider.storage_response_error(
104 response.status, response.reason, body)
105
106 def get_bucket(self, bucket_name, validate=True, headers=None):
107 """
108 Retrieves a bucket by name.
109
110 If the bucket does not exist, an ``S3ResponseError`` will be raised. If
111 you are unsure if the bucket exists or not, you can use the
112 ``S3Connection.lookup`` method, which will either return a valid bucket
113 or ``None``.
114
115 :type bucket_name: string
116 :param bucket_name: The name of the bucket
117
118 :type headers: dict
119 :param headers: Additional headers to pass along with the request to
120 AWS.
121
122 :type validate: boolean
123 :param validate: If ``True``, it will try to fetch all keys within the
124 given bucket. (Default: ``True``)
125 """
126 bucket = self.bucket_class(self, bucket_name)
127 if validate:
128 bucket.get_all_keys(headers, maxkeys=0)
129 return bucket
130
[end of boto/gs/connection.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/boto/gs/connection.py b/boto/gs/connection.py
--- a/boto/gs/connection.py
+++ b/boto/gs/connection.py
@@ -32,7 +32,7 @@
class GSConnection(S3Connection):
DefaultHost = 'storage.googleapis.com'
- QueryString = 'Signature=%s&Expires=%d&AWSAccessKeyId=%s'
+ QueryString = 'Signature=%s&Expires=%d&GoogleAccessId=%s'
def __init__(self, gs_access_key_id=None, gs_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
| {"golden_diff": "diff --git a/boto/gs/connection.py b/boto/gs/connection.py\n--- a/boto/gs/connection.py\n+++ b/boto/gs/connection.py\n@@ -32,7 +32,7 @@\n class GSConnection(S3Connection):\n \n DefaultHost = 'storage.googleapis.com'\n- QueryString = 'Signature=%s&Expires=%d&AWSAccessKeyId=%s'\n+ QueryString = 'Signature=%s&Expires=%d&GoogleAccessId=%s'\n \n def __init__(self, gs_access_key_id=None, gs_secret_access_key=None,\n is_secure=True, port=None, proxy=None, proxy_port=None,\n", "issue": "In 2.25.0 GoogleStorage generate_url() returns url with AWSAccessKeyId instead of GoogleAccessId\nWhen generating a Google Storage signed url:\n\n``` python\nconn = boto.connect_gs(gs_access_key, gs_secret_access)\nsigned_url = conn.generate_url(900, 'GET', bucket=bucket_name, key=file_name)\n```\n\nsigned_url is of the form:\n\nhttps://bucket_name.storage.googleapis.com/file_name?Signature=foo&Expires=1392808714&**AWSAccessKeyId**=gs_access_key\n\nBut should be of the form:\n\nhttps://bucket_name.storage.googleapis.com/file_name?Signature=foo&Expires=1392808714&**GoogleAccessId**=gs_access_key\n\n", "before_files": [{"content": "# Copyright 2010 Google Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\nfrom boto.gs.bucket import Bucket\nfrom boto.s3.connection import S3Connection\nfrom boto.s3.connection import SubdomainCallingFormat\nfrom boto.s3.connection import check_lowercase_bucketname\nfrom boto.utils import get_utf8_value\n\nclass Location(object):\n DEFAULT = 'US'\n EU = 'EU'\n\nclass GSConnection(S3Connection):\n\n DefaultHost = 'storage.googleapis.com'\n QueryString = 'Signature=%s&Expires=%d&AWSAccessKeyId=%s'\n\n def __init__(self, gs_access_key_id=None, gs_secret_access_key=None,\n is_secure=True, port=None, proxy=None, proxy_port=None,\n proxy_user=None, proxy_pass=None,\n host=DefaultHost, debug=0, https_connection_factory=None,\n calling_format=SubdomainCallingFormat(), path='/',\n suppress_consec_slashes=True):\n super(GSConnection, self).__init__(gs_access_key_id, gs_secret_access_key,\n is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,\n host, debug, https_connection_factory, calling_format, path,\n \"google\", Bucket,\n suppress_consec_slashes=suppress_consec_slashes)\n\n def create_bucket(self, bucket_name, headers=None,\n location=Location.DEFAULT, policy=None,\n storage_class='STANDARD'):\n \"\"\"\n Creates a new bucket. By default it's located in the USA. You can\n pass Location.EU to create bucket in the EU. You can also pass\n a LocationConstraint for where the bucket should be located, and\n a StorageClass describing how the data should be stored.\n\n :type bucket_name: string\n :param bucket_name: The name of the new bucket.\n\n :type headers: dict\n :param headers: Additional headers to pass along with the request to GCS.\n\n :type location: :class:`boto.gs.connection.Location`\n :param location: The location of the new bucket.\n\n :type policy: :class:`boto.gs.acl.CannedACLStrings`\n :param policy: A canned ACL policy that will be applied to the new key\n in GCS.\n\n :type storage_class: string\n :param storage_class: Either 'STANDARD' or 'DURABLE_REDUCED_AVAILABILITY'.\n\n \"\"\"\n check_lowercase_bucketname(bucket_name)\n\n if policy:\n if headers:\n headers[self.provider.acl_header] = policy\n else:\n headers = {self.provider.acl_header : policy}\n if not location:\n location = Location.DEFAULT\n location_elem = ('<LocationConstraint>%s</LocationConstraint>'\n % location)\n if storage_class:\n storage_class_elem = ('<StorageClass>%s</StorageClass>'\n % storage_class)\n else:\n storage_class_elem = ''\n data = ('<CreateBucketConfiguration>%s%s</CreateBucketConfiguration>'\n % (location_elem, storage_class_elem))\n response = self.make_request(\n 'PUT', get_utf8_value(bucket_name), headers=headers,\n data=get_utf8_value(data))\n body = response.read()\n if response.status == 409:\n raise self.provider.storage_create_error(\n response.status, response.reason, body)\n if response.status == 200:\n return self.bucket_class(self, bucket_name)\n else:\n raise self.provider.storage_response_error(\n response.status, response.reason, body)\n\n def get_bucket(self, bucket_name, validate=True, headers=None):\n \"\"\"\n Retrieves a bucket by name.\n\n If the bucket does not exist, an ``S3ResponseError`` will be raised. If\n you are unsure if the bucket exists or not, you can use the\n ``S3Connection.lookup`` method, which will either return a valid bucket\n or ``None``.\n\n :type bucket_name: string\n :param bucket_name: The name of the bucket\n\n :type headers: dict\n :param headers: Additional headers to pass along with the request to\n AWS.\n\n :type validate: boolean\n :param validate: If ``True``, it will try to fetch all keys within the\n given bucket. (Default: ``True``)\n \"\"\"\n bucket = self.bucket_class(self, bucket_name)\n if validate:\n bucket.get_all_keys(headers, maxkeys=0)\n return bucket\n", "path": "boto/gs/connection.py"}]} | 2,149 | 140 |
gh_patches_debug_20924 | rasdani/github-patches | git_diff | OCA__server-tools-211 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tree_view_record_id: causes a warning in the logs
in the runbot logs I get:
2015-07-17 13:09:05,793 27838 WARNING 3110977-7-0-458127-all openerp.modules.loading: The model module.tree.view.record.id.installed has no access rules, consider adding one. E.g. access_module_tree_view_record_id_installed,access_module_tree_view_record_id_installed,model_module_tree_view_record_id_installed,,1,1,1,1
I tracked down module.tree.view.record.id.installed to tree_view_record_id
I totally don't understand why the pseudo dynamic a weird name generated that way, but an ACL is missing
</issue>
<code>
[start of sentry_logger/__init__.py]
1 # -*- coding: utf-8 -*-
2 ###############################################################################
3 #
4 # OpenERP, Open Source Management Solution
5 # This module copyright (C) 2010 - 2014 Savoir-faire Linux
6 # (<http://www.savoirfairelinux.com>).
7 #
8 # This program is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU Affero General Public License as
10 # published by the Free Software Foundation, either version 3 of the
11 # License, or (at your option) any later version.
12 #
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU Affero General Public License for more details.
17 #
18 # You should have received a copy of the GNU Affero General Public License
19 # along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #
21 ###############################################################################
22
23 import logging
24 import cgitb
25
26 from openerp.tools import config
27 from openerp.addons.web.controllers.main import Session
28
29 _DEFAULT_LOGGING_LEVEL = logging.ERROR
30
31 try:
32 from .odoo_sentry_client import OdooClient
33 from .odoo_sentry_handler import OdooSentryHandler
34
35 root_logger = logging.root
36
37 processors = (
38 'raven.processors.SanitizePasswordsProcessor',
39 'raven_sanitize_openerp.OpenerpPasswordsProcessor'
40 )
41 if config.get(u'sentry_dsn'):
42 cgitb.enable()
43 # Get DSN info from config file or ~/.openerp_serverrc (recommended)
44 dsn = config.get('sentry_dsn')
45 try:
46 level = getattr(logging, config.get('sentry_logging_level'))
47 except (AttributeError, TypeError):
48 level = _DEFAULT_LOGGING_LEVEL
49 # Create Client
50 client = OdooClient(
51 dsn=dsn,
52 processors=processors,
53 )
54 handler = OdooSentryHandler(client, level=level)
55 root_logger.addHandler(handler)
56 else:
57 root_logger.warn(u"Sentry DSN not defined in config file")
58 client = None
59
60 # Inject sentry_activated to session to display error message or not
61 old_session_info = Session.session_info
62
63 def session_info(self, req):
64 res = old_session_info(self, req)
65 res['sentry_activated'] = bool(client)
66 return res
67
68 Session.session_info = session_info
69 except ImportError:
70 pass
71
[end of sentry_logger/__init__.py]
[start of tree_view_record_id/__openerp__.py]
1 # -*- coding: utf-8 -*-
2 ###############################################################################
3 #
4 # Copyright (C) 2012-TODAY Akretion <http://www.akretion.com>.
5 # All Rights Reserved
6 # @author David BEAL <[email protected]>
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
16 #
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #
20 ###############################################################################
21
22 {
23 'name': 'Tree View Record Id',
24 'version': '0.1',
25 'category': 'Other modules',
26 'sequence': 10,
27 'author': "Akretion,Odoo Community Association (OCA)",
28 'summary': "Adds id field to tree views",
29 'description': """
30 Adds Id field in all tree views of any modules/models, except:
31
32 * Arborescent tree views like 'Products by Category', 'Chart of accounts', etc.
33 * Tree views (like in wizard 'Change password') built on transient models
34 which don't have this column in their table.
35
36 Id field is the primary key of standard sql tables
37 defined by the orm (Odoo model).
38 """,
39 'website': 'http://www.akretion.com',
40 'depends': [
41 'base',
42 ],
43 'data': [
44 ],
45 'demo': [
46 ],
47 'installable': True,
48 'auto_install': False,
49 'application': False,
50 'images': [
51 ],
52 'css': [
53 ],
54 'js': [
55 ],
56 'qweb': [
57 ],
58 }
59
[end of tree_view_record_id/__openerp__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sentry_logger/__init__.py b/sentry_logger/__init__.py
--- a/sentry_logger/__init__.py
+++ b/sentry_logger/__init__.py
@@ -20,6 +20,7 @@
#
###############################################################################
+import os
import logging
import cgitb
@@ -54,7 +55,12 @@
handler = OdooSentryHandler(client, level=level)
root_logger.addHandler(handler)
else:
- root_logger.warn(u"Sentry DSN not defined in config file")
+ msg = u"Sentry DSN not defined in config file"
+ if os.environ.get('OCA_CI'):
+ # don't fail the build on runbot for this
+ root_logger.info(msg)
+ else:
+ root_logger.warn(msg)
client = None
# Inject sentry_activated to session to display error message or not
diff --git a/tree_view_record_id/__openerp__.py b/tree_view_record_id/__openerp__.py
--- a/tree_view_record_id/__openerp__.py
+++ b/tree_view_record_id/__openerp__.py
@@ -41,6 +41,7 @@
'base',
],
'data': [
+ 'security/ir.model.access.csv',
],
'demo': [
],
| {"golden_diff": "diff --git a/sentry_logger/__init__.py b/sentry_logger/__init__.py\n--- a/sentry_logger/__init__.py\n+++ b/sentry_logger/__init__.py\n@@ -20,6 +20,7 @@\n #\n ###############################################################################\n \n+import os\n import logging\n import cgitb\n \n@@ -54,7 +55,12 @@\n handler = OdooSentryHandler(client, level=level)\n root_logger.addHandler(handler)\n else:\n- root_logger.warn(u\"Sentry DSN not defined in config file\")\n+ msg = u\"Sentry DSN not defined in config file\"\n+ if os.environ.get('OCA_CI'):\n+ # don't fail the build on runbot for this\n+ root_logger.info(msg)\n+ else:\n+ root_logger.warn(msg)\n client = None\n \n # Inject sentry_activated to session to display error message or not\ndiff --git a/tree_view_record_id/__openerp__.py b/tree_view_record_id/__openerp__.py\n--- a/tree_view_record_id/__openerp__.py\n+++ b/tree_view_record_id/__openerp__.py\n@@ -41,6 +41,7 @@\n 'base',\n ],\n 'data': [\n+ 'security/ir.model.access.csv',\n ],\n 'demo': [\n ],\n", "issue": "tree_view_record_id: causes a warning in the logs\nin the runbot logs I get:\n\n2015-07-17 13:09:05,793 27838 WARNING 3110977-7-0-458127-all openerp.modules.loading: The model module.tree.view.record.id.installed has no access rules, consider adding one. E.g. access_module_tree_view_record_id_installed,access_module_tree_view_record_id_installed,model_module_tree_view_record_id_installed,,1,1,1,1\n\nI tracked down module.tree.view.record.id.installed to tree_view_record_id\n\nI totally don't understand why the pseudo dynamic a weird name generated that way, but an ACL is missing\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n###############################################################################\n#\n# OpenERP, Open Source Management Solution\n# This module copyright (C) 2010 - 2014 Savoir-faire Linux\n# (<http://www.savoirfairelinux.com>).\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n###############################################################################\n\nimport logging\nimport cgitb\n\nfrom openerp.tools import config\nfrom openerp.addons.web.controllers.main import Session\n\n_DEFAULT_LOGGING_LEVEL = logging.ERROR\n\ntry:\n from .odoo_sentry_client import OdooClient\n from .odoo_sentry_handler import OdooSentryHandler\n\n root_logger = logging.root\n\n processors = (\n 'raven.processors.SanitizePasswordsProcessor',\n 'raven_sanitize_openerp.OpenerpPasswordsProcessor'\n )\n if config.get(u'sentry_dsn'):\n cgitb.enable()\n # Get DSN info from config file or ~/.openerp_serverrc (recommended)\n dsn = config.get('sentry_dsn')\n try:\n level = getattr(logging, config.get('sentry_logging_level'))\n except (AttributeError, TypeError):\n level = _DEFAULT_LOGGING_LEVEL\n # Create Client\n client = OdooClient(\n dsn=dsn,\n processors=processors,\n )\n handler = OdooSentryHandler(client, level=level)\n root_logger.addHandler(handler)\n else:\n root_logger.warn(u\"Sentry DSN not defined in config file\")\n client = None\n\n # Inject sentry_activated to session to display error message or not\n old_session_info = Session.session_info\n\n def session_info(self, req):\n res = old_session_info(self, req)\n res['sentry_activated'] = bool(client)\n return res\n\n Session.session_info = session_info\nexcept ImportError:\n pass\n", "path": "sentry_logger/__init__.py"}, {"content": "# -*- coding: utf-8 -*-\n###############################################################################\n#\n# Copyright (C) 2012-TODAY Akretion <http://www.akretion.com>.\n# All Rights Reserved\n# @author David BEAL <[email protected]>\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n###############################################################################\n\n{\n 'name': 'Tree View Record Id',\n 'version': '0.1',\n 'category': 'Other modules',\n 'sequence': 10,\n 'author': \"Akretion,Odoo Community Association (OCA)\",\n 'summary': \"Adds id field to tree views\",\n 'description': \"\"\"\nAdds Id field in all tree views of any modules/models, except:\n\n* Arborescent tree views like 'Products by Category', 'Chart of accounts', etc.\n* Tree views (like in wizard 'Change password') built on transient models\n which don't have this column in their table.\n\nId field is the primary key of standard sql tables\ndefined by the orm (Odoo model).\n \"\"\",\n 'website': 'http://www.akretion.com',\n 'depends': [\n 'base',\n ],\n 'data': [\n ],\n 'demo': [\n ],\n 'installable': True,\n 'auto_install': False,\n 'application': False,\n 'images': [\n ],\n 'css': [\n ],\n 'js': [\n ],\n 'qweb': [\n ],\n}\n", "path": "tree_view_record_id/__openerp__.py"}]} | 1,987 | 293 |
gh_patches_debug_5906 | rasdani/github-patches | git_diff | mesonbuild__meson-2743 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
run_target Permission Denied error should be clearer
Minimal example -
[folder.zip](https://github.com/mesonbuild/meson/files/1530489/folder.zip)
I ran - `mkdir build && cd build && meson .. && ninja` and everything works. Now I run - `ninja myscript` and it throws errors -
```
[0/1] Running external command myscript.
Traceback (most recent call last):
File "/usr/bin/meson", line 37, in <module>
sys.exit(main())
File "/usr/bin/meson", line 34, in main
return mesonmain.run(sys.argv[1:], launcher)
File "/usr/lib/python3.6/site-packages/mesonbuild/mesonmain.py", line 311, in run
sys.exit(run_script_command(args[1:]))
File "/usr/lib/python3.6/site-packages/mesonbuild/mesonmain.py", line 278, in run_script_command
return cmdfunc(cmdargs)
File "/usr/lib/python3.6/site-packages/mesonbuild/scripts/commandrunner.py", line 60, in run
pc = run_command(src_dir, build_dir, subdir, meson_command, command, arguments)
File "/usr/lib/python3.6/site-packages/mesonbuild/scripts/commandrunner.py", line 39, in run_command
return subprocess.Popen(command_array, env=child_env, cwd=cwd)
File "/usr/lib/python3.6/subprocess.py", line 709, in __init__
restore_signals, start_new_session)
File "/usr/lib/python3.6/subprocess.py", line 1344, in _execute_child
raise child_exception_type(errno_num, err_msg, err_filename)
PermissionError: [Errno 13] Permission denied: '/home/agauniyal/temp/scripts/script.sh'
FAILED: meson-myscript
/usr/bin/python /usr/bin/meson --internal commandrunner /home/agauniyal/temp/ /home/agauniyal/temp/build '' /usr/bin/python /usr/bin/meson /home/agauniyal/temp/scripts/script.sh
ninja: build stopped: subcommand failed.
```
</issue>
<code>
[start of mesonbuild/scripts/commandrunner.py]
1 # Copyright 2014 The Meson development team
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """This program is a wrapper to run external commands. It determines
16 what to run, sets up the environment and executes the command."""
17
18 import sys, os, subprocess, shutil, shlex
19
20 def run_command(source_dir, build_dir, subdir, meson_command, command, arguments):
21 env = {'MESON_SOURCE_ROOT': source_dir,
22 'MESON_BUILD_ROOT': build_dir,
23 'MESON_SUBDIR': subdir,
24 'MESONINTROSPECT': ' '.join([shlex.quote(x) for x in meson_command + ['introspect']]),
25 }
26 cwd = os.path.join(source_dir, subdir)
27 child_env = os.environ.copy()
28 child_env.update(env)
29
30 # Is the command an executable in path?
31 exe = shutil.which(command)
32 if exe is not None:
33 command_array = [exe] + arguments
34 return subprocess.Popen(command_array, env=child_env, cwd=cwd)
35 # No? Maybe it is a script in the source tree.
36 fullpath = os.path.join(source_dir, subdir, command)
37 command_array = [fullpath] + arguments
38 try:
39 return subprocess.Popen(command_array, env=child_env, cwd=cwd)
40 except FileNotFoundError:
41 print('Could not execute command "%s".' % command)
42 sys.exit(1)
43
44 def run(args):
45 if len(args) < 4:
46 print('commandrunner.py <source dir> <build dir> <subdir> <command> [arguments]')
47 return 1
48 src_dir = args[0]
49 build_dir = args[1]
50 subdir = args[2]
51 meson_command = args[3]
52 if 'python' in meson_command: # Hack.
53 meson_command = [meson_command, args[4]]
54 command = args[5]
55 arguments = args[6:]
56 else:
57 meson_command = [meson_command]
58 command = args[4]
59 arguments = args[5:]
60 pc = run_command(src_dir, build_dir, subdir, meson_command, command, arguments)
61 pc.wait()
62 return pc.returncode
63
64 if __name__ == '__main__':
65 sys.exit(run(sys.argv[1:]))
66
[end of mesonbuild/scripts/commandrunner.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mesonbuild/scripts/commandrunner.py b/mesonbuild/scripts/commandrunner.py
--- a/mesonbuild/scripts/commandrunner.py
+++ b/mesonbuild/scripts/commandrunner.py
@@ -38,7 +38,10 @@
try:
return subprocess.Popen(command_array, env=child_env, cwd=cwd)
except FileNotFoundError:
- print('Could not execute command "%s".' % command)
+ print('Could not execute command "%s". File not found.' % command)
+ sys.exit(1)
+ except PermissionError:
+ print('Could not execute command "%s". File not executable.' % command)
sys.exit(1)
def run(args):
| {"golden_diff": "diff --git a/mesonbuild/scripts/commandrunner.py b/mesonbuild/scripts/commandrunner.py\n--- a/mesonbuild/scripts/commandrunner.py\n+++ b/mesonbuild/scripts/commandrunner.py\n@@ -38,7 +38,10 @@\n try:\n return subprocess.Popen(command_array, env=child_env, cwd=cwd)\n except FileNotFoundError:\n- print('Could not execute command \"%s\".' % command)\n+ print('Could not execute command \"%s\". File not found.' % command)\n+ sys.exit(1)\n+ except PermissionError:\n+ print('Could not execute command \"%s\". File not executable.' % command)\n sys.exit(1)\n \n def run(args):\n", "issue": "run_target Permission Denied error should be clearer\nMinimal example - \r\n[folder.zip](https://github.com/mesonbuild/meson/files/1530489/folder.zip)\r\n\r\nI ran - `mkdir build && cd build && meson .. && ninja` and everything works. Now I run - `ninja myscript` and it throws errors -\r\n\r\n```\r\n[0/1] Running external command myscript.\r\nTraceback (most recent call last):\r\n File \"/usr/bin/meson\", line 37, in <module>\r\n sys.exit(main())\r\n File \"/usr/bin/meson\", line 34, in main\r\n return mesonmain.run(sys.argv[1:], launcher)\r\n File \"/usr/lib/python3.6/site-packages/mesonbuild/mesonmain.py\", line 311, in run\r\n sys.exit(run_script_command(args[1:]))\r\n File \"/usr/lib/python3.6/site-packages/mesonbuild/mesonmain.py\", line 278, in run_script_command\r\n return cmdfunc(cmdargs)\r\n File \"/usr/lib/python3.6/site-packages/mesonbuild/scripts/commandrunner.py\", line 60, in run\r\n pc = run_command(src_dir, build_dir, subdir, meson_command, command, arguments)\r\n File \"/usr/lib/python3.6/site-packages/mesonbuild/scripts/commandrunner.py\", line 39, in run_command\r\n return subprocess.Popen(command_array, env=child_env, cwd=cwd)\r\n File \"/usr/lib/python3.6/subprocess.py\", line 709, in __init__\r\n restore_signals, start_new_session)\r\n File \"/usr/lib/python3.6/subprocess.py\", line 1344, in _execute_child\r\n raise child_exception_type(errno_num, err_msg, err_filename)\r\nPermissionError: [Errno 13] Permission denied: '/home/agauniyal/temp/scripts/script.sh'\r\nFAILED: meson-myscript \r\n/usr/bin/python /usr/bin/meson --internal commandrunner /home/agauniyal/temp/ /home/agauniyal/temp/build '' /usr/bin/python /usr/bin/meson /home/agauniyal/temp/scripts/script.sh\r\nninja: build stopped: subcommand failed.\r\n```\n", "before_files": [{"content": "# Copyright 2014 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This program is a wrapper to run external commands. It determines\nwhat to run, sets up the environment and executes the command.\"\"\"\n\nimport sys, os, subprocess, shutil, shlex\n\ndef run_command(source_dir, build_dir, subdir, meson_command, command, arguments):\n env = {'MESON_SOURCE_ROOT': source_dir,\n 'MESON_BUILD_ROOT': build_dir,\n 'MESON_SUBDIR': subdir,\n 'MESONINTROSPECT': ' '.join([shlex.quote(x) for x in meson_command + ['introspect']]),\n }\n cwd = os.path.join(source_dir, subdir)\n child_env = os.environ.copy()\n child_env.update(env)\n\n # Is the command an executable in path?\n exe = shutil.which(command)\n if exe is not None:\n command_array = [exe] + arguments\n return subprocess.Popen(command_array, env=child_env, cwd=cwd)\n # No? Maybe it is a script in the source tree.\n fullpath = os.path.join(source_dir, subdir, command)\n command_array = [fullpath] + arguments\n try:\n return subprocess.Popen(command_array, env=child_env, cwd=cwd)\n except FileNotFoundError:\n print('Could not execute command \"%s\".' % command)\n sys.exit(1)\n\ndef run(args):\n if len(args) < 4:\n print('commandrunner.py <source dir> <build dir> <subdir> <command> [arguments]')\n return 1\n src_dir = args[0]\n build_dir = args[1]\n subdir = args[2]\n meson_command = args[3]\n if 'python' in meson_command: # Hack.\n meson_command = [meson_command, args[4]]\n command = args[5]\n arguments = args[6:]\n else:\n meson_command = [meson_command]\n command = args[4]\n arguments = args[5:]\n pc = run_command(src_dir, build_dir, subdir, meson_command, command, arguments)\n pc.wait()\n return pc.returncode\n\nif __name__ == '__main__':\n sys.exit(run(sys.argv[1:]))\n", "path": "mesonbuild/scripts/commandrunner.py"}]} | 1,747 | 151 |
gh_patches_debug_15991 | rasdani/github-patches | git_diff | facebookresearch__ParlAI-3079 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
FileNotFoundError when running WoW in interactive
**Bug description**
I am running the following command: `parlai interactive -mf models:wizard_of_wikipedia/end2end_generator/model -t wizard_of_wikipedia`. At the step when "loading topics" there is a missing file error: `FileNotFoundError: [Errno 2] No such file or directory: '/mnt/disks/disk-huge/melody/parlai/data/wizard_of_wikipedia/topic_splits.json'`
**Reproduction steps**
Run `parlai interactive -mf models:wizard_of_wikipedia/end2end_generator/model -t wizard_of_wikipedia`
**Expected behavior**
The file is automatically downloaded, topics are loaded and the interactive session begins.
**Logs**
Please paste the command line output:
```
14:49:06 | creating task(s): wizard_of_wikipedia
[ loading topics.. ]
Traceback (most recent call last):
File "/home/melody/anaconda3/envs/parlai/bin/parlai", line 33, in <module>
sys.exit(load_entry_point('parlai', 'console_scripts', 'parlai')())
File "/mnt/disks/disk-huge/melody/parlai/parlai/core/script.py", line 272, in superscript_main
SCRIPT_REGISTRY[cmd].klass._run_from_parser_and_opt(opt, parser)
File "/mnt/disks/disk-huge/melody/parlai/parlai/core/script.py", line 88, in _run_from_parser_and_opt
return script.run()
File "/mnt/disks/disk-huge/melody/parlai/parlai/scripts/interactive.py", line 117, in run
return interactive(self.opt)
File "/mnt/disks/disk-huge/melody/parlai/parlai/scripts/interactive.py", line 88, in interactive
world = create_task(opt, [human_agent, agent])
File "/mnt/disks/disk-huge/melody/parlai/parlai/core/worlds.py", line 1262, in create_task
world = create_task_world(opt, user_agents, default_world=default_world)
File "/mnt/disks/disk-huge/melody/parlai/parlai/core/worlds.py", line 1235, in create_task_world
return world_class(opt, task_agents + user_agents)
File "/mnt/disks/disk-huge/melody/parlai/parlai/tasks/wizard_of_wikipedia/worlds.py", line 53, in __init__
self._load_topics(opt)
File "/mnt/disks/disk-huge/melody/parlai/parlai/tasks/wizard_of_wikipedia/worlds.py", line 82, in _load_topics
self.topic_list = json.load(open(topics_path, 'rb'))[datatype]
FileNotFoundError: [Errno 2] No such file or directory: '/mnt/disks/disk-huge/melody/parlai/data/wizard_of_wikipedia/topic_splits.json'
```
**Additional context**
Add any other context about the problem here. (like proxy settings, network setup, overall goals, etc.)
</issue>
<code>
[start of parlai/tasks/wizard_of_wikipedia/worlds.py]
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6
7 from copy import deepcopy
8 import json
9 import random
10 import os
11 import string
12
13
14 from parlai.core.agents import create_agent
15 from parlai.core.message import Message
16 from parlai.core.worlds import DialogPartnerWorld, validate
17 from parlai.tasks.wizard_of_wikipedia.agents import TOKEN_KNOWLEDGE, TOKEN_END_KNOWLEDGE
18 from parlai.tasks.self_chat.worlds import SelfChatWorld as SelfChatBaseWorld
19 from parlai.utils.misc import warn_once
20
21 from projects.wizard_of_wikipedia.knowledge_retriever.knowledge_retriever import (
22 KnowledgeRetrieverAgent,
23 )
24
25
26 NO_TOPIC = '[NO TOPIC]'
27
28
29 class InteractiveWorld(DialogPartnerWorld):
30 """
31 Interactive world for wizard of wikipedia.
32
33 Used for models trained on the task `-t wizard_of_wikipedia`. Automatically
34 retrieves knowledge from Wikipedia based on the conversation history using a TF-IDF
35 retriever. Then uses a Transformer-based model to select a checked sentence from
36 these retrieved passages.
37 """
38
39 @staticmethod
40 def add_cmdline_args(argparser):
41 parser = argparser.add_argument_group('WoW Interactive World Args')
42 parser.add_argument(
43 '--print-checked-sentence',
44 type='bool',
45 default=True,
46 help='Print sentence that the model checks.',
47 )
48
49 def __init__(self, opt, agents, shared=None):
50 super().__init__(opt, agents, shared)
51 print('[ loading topics.. ]')
52 self.opt = opt
53 self._load_topics(opt)
54 self.num_topics = opt['num_topics']
55 self.cnt = 0
56 self.human_agent = self.agents[0]
57 self.model_agent = self.agents[1]
58
59 self._set_up_knowledge_agent(opt.get('add_token_knowledge', False))
60
61 self.print_checked_sentence = opt['print_checked_sentence']
62
63 def _set_up_knowledge_agent(self, add_token_knowledge=False):
64 from parlai.core.params import ParlaiParser
65
66 parser = ParlaiParser(False, False)
67 KnowledgeRetrieverAgent.add_cmdline_args(parser)
68 parser.set_params(
69 model='projects:wizard_of_wikipedia:knowledge_retriever',
70 add_token_knowledge=add_token_knowledge,
71 )
72 knowledge_opt = parser.parse_args([])
73 self.knowledge_agent = create_agent(knowledge_opt)
74
75 def _load_topics(self, opt):
76 # Load possible chosen topics
77 topics_path = os.path.join(
78 opt['datapath'], 'wizard_of_wikipedia', 'topic_splits.json'
79 )
80 # Get training set topics
81 datatype = opt['datatype'].split(':')[0]
82 self.topic_list = json.load(open(topics_path, 'rb'))[datatype]
83
84 def _get_new_topic(self):
85 random.seed()
86 topics = random.sample(self.topic_list, self.num_topics - 1)
87 topics.append(NO_TOPIC)
88 letters = list(string.ascii_uppercase)[: self.num_topics]
89 topic_list = {x: y for x, y in zip(letters, topics)}
90 topic_text = '\n'.join(['{}: {}'.format(k, v) for k, v in topic_list.items()])
91
92 done = False
93 while not done:
94 self.human_agent.observe(
95 {
96 'text': '\nPlease choose one of the following topics by typing '
97 'A, B, C, ..., etc. : \n\n{}\n'.format(topic_text)
98 }
99 )
100 topic_act = self.human_agent.act()
101 choice = topic_act['text'][0].upper()
102 if choice in topic_list:
103 done = True
104 else:
105 self.human_agent.observe(
106 {'text': 'Invalid response, please try again.'}
107 )
108
109 chosen_topic = topic_list[choice]
110 print('[ Your chosen topic is: {} ]'.format(chosen_topic))
111 return chosen_topic
112
113 def _add_knowledge_to_act(self, act):
114 self.knowledge_agent.observe(act, actor_id='apprentice')
115 knowledge_act = self.knowledge_agent.act()
116 act['knowledge'] = knowledge_act['text']
117 act['checked_sentence'] = knowledge_act['checked_sentence']
118 if self.print_checked_sentence:
119 print(
120 '[ Using chosen sentence from Wikpedia ]: {}'.format(
121 knowledge_act['checked_sentence']
122 )
123 )
124 act['title'] = knowledge_act['title']
125 return act
126
127 def parley(self):
128 """
129 Loop between wizard and apprentice.
130
131 Adds knowledge to the wizard observations. Assumes that the model agent is the
132 wizard model.
133 """
134
135 if self.cnt == 0:
136 self.topic = self._get_new_topic()
137 self.acts = [None, None]
138 self.human_first = random.choice([0, 1])
139
140 # possibly get human act first
141 if self.cnt == 0 and not self.human_first:
142 self.acts[0] = act = Message({'text': '', 'episode_done': False})
143 act = self.acts[0]
144 else:
145 self.acts[0] = self.human_agent.act()
146 act = deepcopy(self.acts[0])
147
148 # model agent observe
149 if self.cnt == 0 and self.topic != NO_TOPIC:
150 # add the chosen_topic to the message
151 act['chosen_topic'] = self.topic
152 act.force_set('text', '\n'.join([self.topic, act.get('text', 'hi')]))
153
154 # add knowledge to the model observation
155 act = self._add_knowledge_to_act(act)
156
157 # model observes knowledge and human (apprentice) act
158 self.model_agent.observe(validate(act))
159
160 # model agent act
161 self.acts[1] = self.model_agent.act()
162
163 # add the model reply to the knowledge retriever's dialogue history
164 self.knowledge_agent.observe(self.acts[1], actor_id='wizard')
165
166 # human (apprentice) agent observes model act
167 self.human_agent.observe(validate(self.acts[1]))
168
169 self.update_counters()
170 self.cnt += 1
171
172 if self.episode_done():
173 print('[ CHAT DONE ]')
174 print('\n[ Preparing new chat... ]\n')
175 self.cnt = 0
176 self.model_agent.reset()
177
178
179 class InteractiveGeneratorWorld(InteractiveWorld):
180 """
181 Interactive world for generative models.
182
183 Specifically a world for models trained on the task `-t wizard_of_wikipedia
184 generator`.
185 """
186
187 def __init__(self, opt, agents, shared=None):
188 super().__init__(opt, agents, shared)
189 print('[ loading topics.. ]')
190 self.opt = opt
191 self._load_topics(opt)
192 self.num_topics = opt['num_topics']
193 self.cnt = 0
194 self.human_agent = self.agents[0]
195 self.model_agent = self.agents[1]
196
197 self._set_up_knowledge_agent(add_token_knowledge=True)
198
199 def _add_knowledge_to_act(self, act):
200 act = super()._add_knowledge_to_act(act)
201 if self.opt.get('prepend_gold_knowledge', False):
202 warn_once(
203 'Prepending selected knowledge to dialogue input.'
204 'If this was not intended behavior, please run with the '
205 'flag --prepend-gold-knowledge False'
206 )
207 knowledge_text = ' '.join(
208 [TOKEN_KNOWLEDGE, act['checked_sentence'], TOKEN_END_KNOWLEDGE]
209 )
210 new_text = '\n'.join([knowledge_text, act['text']])
211 act.force_set('text', new_text)
212 else:
213 warn_once(
214 'Not prepending selected knowledge to dialogue input.'
215 'If this was not intended behavior, please run with the '
216 'flag --prepend-gold-knowledge True'
217 )
218 return act
219
220
221 class SelfChatWorld(SelfChatBaseWorld):
222 def init_contexts(self, shared=None):
223 print('[ loading topics.. ]')
224 # Load possible chosen topics
225 topics_path = os.path.join(
226 self.opt['datapath'], 'wizard_of_wikipedia', 'topic_splits.json'
227 )
228 # Get training set topics
229 datatype = self.opt['datatype'].split(':')[0]
230 self.topic_list = json.load(open(topics_path, 'rb'))[datatype]
231
232 def get_contexts(self):
233 random.seed()
234 topic = random.choice(self.topic_list)
235 return [topic, topic]
236
[end of parlai/tasks/wizard_of_wikipedia/worlds.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parlai/tasks/wizard_of_wikipedia/worlds.py b/parlai/tasks/wizard_of_wikipedia/worlds.py
--- a/parlai/tasks/wizard_of_wikipedia/worlds.py
+++ b/parlai/tasks/wizard_of_wikipedia/worlds.py
@@ -18,6 +18,8 @@
from parlai.tasks.self_chat.worlds import SelfChatWorld as SelfChatBaseWorld
from parlai.utils.misc import warn_once
+from .build import build
+
from projects.wizard_of_wikipedia.knowledge_retriever.knowledge_retriever import (
KnowledgeRetrieverAgent,
)
@@ -74,6 +76,7 @@
def _load_topics(self, opt):
# Load possible chosen topics
+ build(opt)
topics_path = os.path.join(
opt['datapath'], 'wizard_of_wikipedia', 'topic_splits.json'
)
| {"golden_diff": "diff --git a/parlai/tasks/wizard_of_wikipedia/worlds.py b/parlai/tasks/wizard_of_wikipedia/worlds.py\n--- a/parlai/tasks/wizard_of_wikipedia/worlds.py\n+++ b/parlai/tasks/wizard_of_wikipedia/worlds.py\n@@ -18,6 +18,8 @@\n from parlai.tasks.self_chat.worlds import SelfChatWorld as SelfChatBaseWorld\n from parlai.utils.misc import warn_once\n \n+from .build import build\n+\n from projects.wizard_of_wikipedia.knowledge_retriever.knowledge_retriever import (\n KnowledgeRetrieverAgent,\n )\n@@ -74,6 +76,7 @@\n \n def _load_topics(self, opt):\n # Load possible chosen topics\n+ build(opt)\n topics_path = os.path.join(\n opt['datapath'], 'wizard_of_wikipedia', 'topic_splits.json'\n )\n", "issue": "FileNotFoundError when running WoW in interactive\n**Bug description**\r\nI am running the following command: `parlai interactive -mf models:wizard_of_wikipedia/end2end_generator/model -t wizard_of_wikipedia`. At the step when \"loading topics\" there is a missing file error: `FileNotFoundError: [Errno 2] No such file or directory: '/mnt/disks/disk-huge/melody/parlai/data/wizard_of_wikipedia/topic_splits.json'`\r\n\r\n**Reproduction steps**\r\nRun `parlai interactive -mf models:wizard_of_wikipedia/end2end_generator/model -t wizard_of_wikipedia`\r\n\r\n**Expected behavior**\r\nThe file is automatically downloaded, topics are loaded and the interactive session begins.\r\n\r\n**Logs**\r\nPlease paste the command line output:\r\n\r\n```\r\n14:49:06 | creating task(s): wizard_of_wikipedia\r\n[ loading topics.. ]\r\nTraceback (most recent call last):\r\n File \"/home/melody/anaconda3/envs/parlai/bin/parlai\", line 33, in <module>\r\n sys.exit(load_entry_point('parlai', 'console_scripts', 'parlai')())\r\n File \"/mnt/disks/disk-huge/melody/parlai/parlai/core/script.py\", line 272, in superscript_main\r\n SCRIPT_REGISTRY[cmd].klass._run_from_parser_and_opt(opt, parser)\r\n File \"/mnt/disks/disk-huge/melody/parlai/parlai/core/script.py\", line 88, in _run_from_parser_and_opt\r\n return script.run()\r\n File \"/mnt/disks/disk-huge/melody/parlai/parlai/scripts/interactive.py\", line 117, in run\r\n return interactive(self.opt)\r\n File \"/mnt/disks/disk-huge/melody/parlai/parlai/scripts/interactive.py\", line 88, in interactive\r\n world = create_task(opt, [human_agent, agent])\r\n File \"/mnt/disks/disk-huge/melody/parlai/parlai/core/worlds.py\", line 1262, in create_task\r\n world = create_task_world(opt, user_agents, default_world=default_world)\r\n File \"/mnt/disks/disk-huge/melody/parlai/parlai/core/worlds.py\", line 1235, in create_task_world\r\n return world_class(opt, task_agents + user_agents)\r\n File \"/mnt/disks/disk-huge/melody/parlai/parlai/tasks/wizard_of_wikipedia/worlds.py\", line 53, in __init__\r\n self._load_topics(opt)\r\n File \"/mnt/disks/disk-huge/melody/parlai/parlai/tasks/wizard_of_wikipedia/worlds.py\", line 82, in _load_topics\r\n self.topic_list = json.load(open(topics_path, 'rb'))[datatype]\r\nFileNotFoundError: [Errno 2] No such file or directory: '/mnt/disks/disk-huge/melody/parlai/data/wizard_of_wikipedia/topic_splits.json'\r\n```\r\n\r\n**Additional context**\r\nAdd any other context about the problem here. (like proxy settings, network setup, overall goals, etc.)\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom copy import deepcopy\nimport json\nimport random\nimport os\nimport string\n\n\nfrom parlai.core.agents import create_agent\nfrom parlai.core.message import Message\nfrom parlai.core.worlds import DialogPartnerWorld, validate\nfrom parlai.tasks.wizard_of_wikipedia.agents import TOKEN_KNOWLEDGE, TOKEN_END_KNOWLEDGE\nfrom parlai.tasks.self_chat.worlds import SelfChatWorld as SelfChatBaseWorld\nfrom parlai.utils.misc import warn_once\n\nfrom projects.wizard_of_wikipedia.knowledge_retriever.knowledge_retriever import (\n KnowledgeRetrieverAgent,\n)\n\n\nNO_TOPIC = '[NO TOPIC]'\n\n\nclass InteractiveWorld(DialogPartnerWorld):\n \"\"\"\n Interactive world for wizard of wikipedia.\n\n Used for models trained on the task `-t wizard_of_wikipedia`. Automatically\n retrieves knowledge from Wikipedia based on the conversation history using a TF-IDF\n retriever. Then uses a Transformer-based model to select a checked sentence from\n these retrieved passages.\n \"\"\"\n\n @staticmethod\n def add_cmdline_args(argparser):\n parser = argparser.add_argument_group('WoW Interactive World Args')\n parser.add_argument(\n '--print-checked-sentence',\n type='bool',\n default=True,\n help='Print sentence that the model checks.',\n )\n\n def __init__(self, opt, agents, shared=None):\n super().__init__(opt, agents, shared)\n print('[ loading topics.. ]')\n self.opt = opt\n self._load_topics(opt)\n self.num_topics = opt['num_topics']\n self.cnt = 0\n self.human_agent = self.agents[0]\n self.model_agent = self.agents[1]\n\n self._set_up_knowledge_agent(opt.get('add_token_knowledge', False))\n\n self.print_checked_sentence = opt['print_checked_sentence']\n\n def _set_up_knowledge_agent(self, add_token_knowledge=False):\n from parlai.core.params import ParlaiParser\n\n parser = ParlaiParser(False, False)\n KnowledgeRetrieverAgent.add_cmdline_args(parser)\n parser.set_params(\n model='projects:wizard_of_wikipedia:knowledge_retriever',\n add_token_knowledge=add_token_knowledge,\n )\n knowledge_opt = parser.parse_args([])\n self.knowledge_agent = create_agent(knowledge_opt)\n\n def _load_topics(self, opt):\n # Load possible chosen topics\n topics_path = os.path.join(\n opt['datapath'], 'wizard_of_wikipedia', 'topic_splits.json'\n )\n # Get training set topics\n datatype = opt['datatype'].split(':')[0]\n self.topic_list = json.load(open(topics_path, 'rb'))[datatype]\n\n def _get_new_topic(self):\n random.seed()\n topics = random.sample(self.topic_list, self.num_topics - 1)\n topics.append(NO_TOPIC)\n letters = list(string.ascii_uppercase)[: self.num_topics]\n topic_list = {x: y for x, y in zip(letters, topics)}\n topic_text = '\\n'.join(['{}: {}'.format(k, v) for k, v in topic_list.items()])\n\n done = False\n while not done:\n self.human_agent.observe(\n {\n 'text': '\\nPlease choose one of the following topics by typing '\n 'A, B, C, ..., etc. : \\n\\n{}\\n'.format(topic_text)\n }\n )\n topic_act = self.human_agent.act()\n choice = topic_act['text'][0].upper()\n if choice in topic_list:\n done = True\n else:\n self.human_agent.observe(\n {'text': 'Invalid response, please try again.'}\n )\n\n chosen_topic = topic_list[choice]\n print('[ Your chosen topic is: {} ]'.format(chosen_topic))\n return chosen_topic\n\n def _add_knowledge_to_act(self, act):\n self.knowledge_agent.observe(act, actor_id='apprentice')\n knowledge_act = self.knowledge_agent.act()\n act['knowledge'] = knowledge_act['text']\n act['checked_sentence'] = knowledge_act['checked_sentence']\n if self.print_checked_sentence:\n print(\n '[ Using chosen sentence from Wikpedia ]: {}'.format(\n knowledge_act['checked_sentence']\n )\n )\n act['title'] = knowledge_act['title']\n return act\n\n def parley(self):\n \"\"\"\n Loop between wizard and apprentice.\n\n Adds knowledge to the wizard observations. Assumes that the model agent is the\n wizard model.\n \"\"\"\n\n if self.cnt == 0:\n self.topic = self._get_new_topic()\n self.acts = [None, None]\n self.human_first = random.choice([0, 1])\n\n # possibly get human act first\n if self.cnt == 0 and not self.human_first:\n self.acts[0] = act = Message({'text': '', 'episode_done': False})\n act = self.acts[0]\n else:\n self.acts[0] = self.human_agent.act()\n act = deepcopy(self.acts[0])\n\n # model agent observe\n if self.cnt == 0 and self.topic != NO_TOPIC:\n # add the chosen_topic to the message\n act['chosen_topic'] = self.topic\n act.force_set('text', '\\n'.join([self.topic, act.get('text', 'hi')]))\n\n # add knowledge to the model observation\n act = self._add_knowledge_to_act(act)\n\n # model observes knowledge and human (apprentice) act\n self.model_agent.observe(validate(act))\n\n # model agent act\n self.acts[1] = self.model_agent.act()\n\n # add the model reply to the knowledge retriever's dialogue history\n self.knowledge_agent.observe(self.acts[1], actor_id='wizard')\n\n # human (apprentice) agent observes model act\n self.human_agent.observe(validate(self.acts[1]))\n\n self.update_counters()\n self.cnt += 1\n\n if self.episode_done():\n print('[ CHAT DONE ]')\n print('\\n[ Preparing new chat... ]\\n')\n self.cnt = 0\n self.model_agent.reset()\n\n\nclass InteractiveGeneratorWorld(InteractiveWorld):\n \"\"\"\n Interactive world for generative models.\n\n Specifically a world for models trained on the task `-t wizard_of_wikipedia\n generator`.\n \"\"\"\n\n def __init__(self, opt, agents, shared=None):\n super().__init__(opt, agents, shared)\n print('[ loading topics.. ]')\n self.opt = opt\n self._load_topics(opt)\n self.num_topics = opt['num_topics']\n self.cnt = 0\n self.human_agent = self.agents[0]\n self.model_agent = self.agents[1]\n\n self._set_up_knowledge_agent(add_token_knowledge=True)\n\n def _add_knowledge_to_act(self, act):\n act = super()._add_knowledge_to_act(act)\n if self.opt.get('prepend_gold_knowledge', False):\n warn_once(\n 'Prepending selected knowledge to dialogue input.'\n 'If this was not intended behavior, please run with the '\n 'flag --prepend-gold-knowledge False'\n )\n knowledge_text = ' '.join(\n [TOKEN_KNOWLEDGE, act['checked_sentence'], TOKEN_END_KNOWLEDGE]\n )\n new_text = '\\n'.join([knowledge_text, act['text']])\n act.force_set('text', new_text)\n else:\n warn_once(\n 'Not prepending selected knowledge to dialogue input.'\n 'If this was not intended behavior, please run with the '\n 'flag --prepend-gold-knowledge True'\n )\n return act\n\n\nclass SelfChatWorld(SelfChatBaseWorld):\n def init_contexts(self, shared=None):\n print('[ loading topics.. ]')\n # Load possible chosen topics\n topics_path = os.path.join(\n self.opt['datapath'], 'wizard_of_wikipedia', 'topic_splits.json'\n )\n # Get training set topics\n datatype = self.opt['datatype'].split(':')[0]\n self.topic_list = json.load(open(topics_path, 'rb'))[datatype]\n\n def get_contexts(self):\n random.seed()\n topic = random.choice(self.topic_list)\n return [topic, topic]\n", "path": "parlai/tasks/wizard_of_wikipedia/worlds.py"}]} | 3,714 | 199 |
gh_patches_debug_11760 | rasdani/github-patches | git_diff | pypa__pip-6774 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError: 'NoneType' object has no attribute 'is_hash_allowed'
**Environment**
* pip version: 19.2
* Python version: 3.6.8
* OS: Mac OSX (Darwin Kernel Version 18.6.0)
**Description**
I made env update in my project including pip as well. After that I wanted to check outdated packages with command:
```python
pip list --outdated --format=columns
```
After that exception was raised.
**Expected behavior**
I expected list of packages or empty list.
**How to Reproduce**
1. Get the newest version of package from PyPI.
2. Then run `pip list --outdated --format=columns`
3. An error occurs.
**Output**
```python
(env) project (develop) $ pip list --outdated --format=columns
ERROR: Exception:
Traceback (most recent call last):
File "/project/env/lib/python3.6/site-packages/pip/_internal/cli/base_command.py", line 188, in main
status = self.run(options, args)
File "/project/env/lib/python3.6/site-packages/pip/_internal/commands/list.py", line 156, in run
packages = self.get_outdated(packages, options)
File "/project/env/lib/python3.6/site-packages/pip/_internal/commands/list.py", line 164, in get_outdated
dist for dist in self.iter_packages_latest_infos(packages, options)
File "/project/env/lib/python3.6/site-packages/pip/_internal/commands/list.py", line 164, in <listcomp>
dist for dist in self.iter_packages_latest_infos(packages, options)
File "/project/env/lib/python3.6/site-packages/pip/_internal/commands/list.py", line 195, in iter_packages_latest_infos
best_candidate = evaluator.get_best_candidate(all_candidates)
File "/project/env/lib/python3.6/site-packages/pip/_internal/index.py", line 729, in get_best_candidate
best_candidate = max(candidates, key=self._sort_key)
File "/project/env/lib/python3.6/site-packages/pip/_internal/index.py", line 710, in _sort_key
has_allowed_hash = int(link.is_hash_allowed(self._hashes))
File "/project/env/lib/python3.6/site-packages/pip/_internal/models/link.py", line 213, in is_hash_allowed
return hashes.is_hash_allowed(self.hash_name, hex_digest=self.hash)
AttributeError: 'NoneType' object has no attribute 'is_hash_allowed'
```
</issue>
<code>
[start of src/pip/_internal/models/link.py]
1 import posixpath
2 import re
3
4 from pip._vendor.six.moves.urllib import parse as urllib_parse
5
6 from pip._internal.utils.misc import (
7 WHEEL_EXTENSION, path_to_url, redact_password_from_url,
8 split_auth_from_netloc, splitext,
9 )
10 from pip._internal.utils.models import KeyBasedCompareMixin
11 from pip._internal.utils.typing import MYPY_CHECK_RUNNING
12
13 if MYPY_CHECK_RUNNING:
14 from typing import Optional, Text, Tuple, Union
15 from pip._internal.index import HTMLPage
16 from pip._internal.utils.hashes import Hashes
17
18
19 class Link(KeyBasedCompareMixin):
20 """Represents a parsed link from a Package Index's simple URL
21 """
22
23 def __init__(
24 self,
25 url, # type: str
26 comes_from=None, # type: Optional[Union[str, HTMLPage]]
27 requires_python=None, # type: Optional[str]
28 yanked_reason=None, # type: Optional[Text]
29 ):
30 # type: (...) -> None
31 """
32 :param url: url of the resource pointed to (href of the link)
33 :param comes_from: instance of HTMLPage where the link was found,
34 or string.
35 :param requires_python: String containing the `Requires-Python`
36 metadata field, specified in PEP 345. This may be specified by
37 a data-requires-python attribute in the HTML link tag, as
38 described in PEP 503.
39 :param yanked_reason: the reason the file has been yanked, if the
40 file has been yanked, or None if the file hasn't been yanked.
41 This is the value of the "data-yanked" attribute, if present, in
42 a simple repository HTML link. If the file has been yanked but
43 no reason was provided, this should be the empty string. See
44 PEP 592 for more information and the specification.
45 """
46
47 # url can be a UNC windows share
48 if url.startswith('\\\\'):
49 url = path_to_url(url)
50
51 self._parsed_url = urllib_parse.urlsplit(url)
52 # Store the url as a private attribute to prevent accidentally
53 # trying to set a new value.
54 self._url = url
55
56 self.comes_from = comes_from
57 self.requires_python = requires_python if requires_python else None
58 self.yanked_reason = yanked_reason
59
60 super(Link, self).__init__(key=url, defining_class=Link)
61
62 def __str__(self):
63 if self.requires_python:
64 rp = ' (requires-python:%s)' % self.requires_python
65 else:
66 rp = ''
67 if self.comes_from:
68 return '%s (from %s)%s' % (redact_password_from_url(self._url),
69 self.comes_from, rp)
70 else:
71 return redact_password_from_url(str(self._url))
72
73 def __repr__(self):
74 return '<Link %s>' % self
75
76 @property
77 def url(self):
78 # type: () -> str
79 return self._url
80
81 @property
82 def filename(self):
83 # type: () -> str
84 path = self.path.rstrip('/')
85 name = posixpath.basename(path)
86 if not name:
87 # Make sure we don't leak auth information if the netloc
88 # includes a username and password.
89 netloc, user_pass = split_auth_from_netloc(self.netloc)
90 return netloc
91
92 name = urllib_parse.unquote(name)
93 assert name, ('URL %r produced no filename' % self._url)
94 return name
95
96 @property
97 def scheme(self):
98 # type: () -> str
99 return self._parsed_url.scheme
100
101 @property
102 def netloc(self):
103 # type: () -> str
104 """
105 This can contain auth information.
106 """
107 return self._parsed_url.netloc
108
109 @property
110 def path(self):
111 # type: () -> str
112 return urllib_parse.unquote(self._parsed_url.path)
113
114 def splitext(self):
115 # type: () -> Tuple[str, str]
116 return splitext(posixpath.basename(self.path.rstrip('/')))
117
118 @property
119 def ext(self):
120 # type: () -> str
121 return self.splitext()[1]
122
123 @property
124 def url_without_fragment(self):
125 # type: () -> str
126 scheme, netloc, path, query, fragment = self._parsed_url
127 return urllib_parse.urlunsplit((scheme, netloc, path, query, None))
128
129 _egg_fragment_re = re.compile(r'[#&]egg=([^&]*)')
130
131 @property
132 def egg_fragment(self):
133 # type: () -> Optional[str]
134 match = self._egg_fragment_re.search(self._url)
135 if not match:
136 return None
137 return match.group(1)
138
139 _subdirectory_fragment_re = re.compile(r'[#&]subdirectory=([^&]*)')
140
141 @property
142 def subdirectory_fragment(self):
143 # type: () -> Optional[str]
144 match = self._subdirectory_fragment_re.search(self._url)
145 if not match:
146 return None
147 return match.group(1)
148
149 _hash_re = re.compile(
150 r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)'
151 )
152
153 @property
154 def hash(self):
155 # type: () -> Optional[str]
156 match = self._hash_re.search(self._url)
157 if match:
158 return match.group(2)
159 return None
160
161 @property
162 def hash_name(self):
163 # type: () -> Optional[str]
164 match = self._hash_re.search(self._url)
165 if match:
166 return match.group(1)
167 return None
168
169 @property
170 def show_url(self):
171 # type: () -> Optional[str]
172 return posixpath.basename(self._url.split('#', 1)[0].split('?', 1)[0])
173
174 @property
175 def is_wheel(self):
176 # type: () -> bool
177 return self.ext == WHEEL_EXTENSION
178
179 @property
180 def is_artifact(self):
181 # type: () -> bool
182 """
183 Determines if this points to an actual artifact (e.g. a tarball) or if
184 it points to an "abstract" thing like a path or a VCS location.
185 """
186 from pip._internal.vcs import vcs
187
188 if self.scheme in vcs.all_schemes:
189 return False
190
191 return True
192
193 @property
194 def is_yanked(self):
195 # type: () -> bool
196 return self.yanked_reason is not None
197
198 @property
199 def has_hash(self):
200 return self.hash_name is not None
201
202 def is_hash_allowed(self, hashes):
203 # type: (Hashes) -> bool
204 """
205 Return True if the link has a hash and it is allowed.
206 """
207 if not self.has_hash:
208 return False
209 # Assert non-None so mypy knows self.hash_name and self.hash are str.
210 assert self.hash_name is not None
211 assert self.hash is not None
212
213 return hashes.is_hash_allowed(self.hash_name, hex_digest=self.hash)
214
[end of src/pip/_internal/models/link.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pip/_internal/models/link.py b/src/pip/_internal/models/link.py
--- a/src/pip/_internal/models/link.py
+++ b/src/pip/_internal/models/link.py
@@ -200,11 +200,11 @@
return self.hash_name is not None
def is_hash_allowed(self, hashes):
- # type: (Hashes) -> bool
+ # type: (Optional[Hashes]) -> bool
"""
Return True if the link has a hash and it is allowed.
"""
- if not self.has_hash:
+ if hashes is None or not self.has_hash:
return False
# Assert non-None so mypy knows self.hash_name and self.hash are str.
assert self.hash_name is not None
| {"golden_diff": "diff --git a/src/pip/_internal/models/link.py b/src/pip/_internal/models/link.py\n--- a/src/pip/_internal/models/link.py\n+++ b/src/pip/_internal/models/link.py\n@@ -200,11 +200,11 @@\n return self.hash_name is not None\n \n def is_hash_allowed(self, hashes):\n- # type: (Hashes) -> bool\n+ # type: (Optional[Hashes]) -> bool\n \"\"\"\n Return True if the link has a hash and it is allowed.\n \"\"\"\n- if not self.has_hash:\n+ if hashes is None or not self.has_hash:\n return False\n # Assert non-None so mypy knows self.hash_name and self.hash are str.\n assert self.hash_name is not None\n", "issue": "AttributeError: 'NoneType' object has no attribute 'is_hash_allowed'\n**Environment**\r\n\r\n* pip version: 19.2\r\n* Python version: 3.6.8\r\n* OS: Mac OSX (Darwin Kernel Version 18.6.0)\r\n\r\n**Description**\r\nI made env update in my project including pip as well. After that I wanted to check outdated packages with command:\r\n\r\n```python\r\npip list --outdated --format=columns\r\n```\r\n\r\nAfter that exception was raised.\r\n\r\n**Expected behavior**\r\nI expected list of packages or empty list.\r\n\r\n**How to Reproduce**\r\n1. Get the newest version of package from PyPI.\r\n2. Then run `pip list --outdated --format=columns`\r\n3. An error occurs.\r\n\r\n**Output**\r\n```python\r\n(env) project (develop) $ pip list --outdated --format=columns\r\nERROR: Exception:\r\nTraceback (most recent call last):\r\n File \"/project/env/lib/python3.6/site-packages/pip/_internal/cli/base_command.py\", line 188, in main\r\n status = self.run(options, args)\r\n File \"/project/env/lib/python3.6/site-packages/pip/_internal/commands/list.py\", line 156, in run\r\n packages = self.get_outdated(packages, options)\r\n File \"/project/env/lib/python3.6/site-packages/pip/_internal/commands/list.py\", line 164, in get_outdated\r\n dist for dist in self.iter_packages_latest_infos(packages, options)\r\n File \"/project/env/lib/python3.6/site-packages/pip/_internal/commands/list.py\", line 164, in <listcomp>\r\n dist for dist in self.iter_packages_latest_infos(packages, options)\r\n File \"/project/env/lib/python3.6/site-packages/pip/_internal/commands/list.py\", line 195, in iter_packages_latest_infos\r\n best_candidate = evaluator.get_best_candidate(all_candidates)\r\n File \"/project/env/lib/python3.6/site-packages/pip/_internal/index.py\", line 729, in get_best_candidate\r\n best_candidate = max(candidates, key=self._sort_key)\r\n File \"/project/env/lib/python3.6/site-packages/pip/_internal/index.py\", line 710, in _sort_key\r\n has_allowed_hash = int(link.is_hash_allowed(self._hashes))\r\n File \"/project/env/lib/python3.6/site-packages/pip/_internal/models/link.py\", line 213, in is_hash_allowed\r\n return hashes.is_hash_allowed(self.hash_name, hex_digest=self.hash)\r\nAttributeError: 'NoneType' object has no attribute 'is_hash_allowed'\r\n```\n", "before_files": [{"content": "import posixpath\nimport re\n\nfrom pip._vendor.six.moves.urllib import parse as urllib_parse\n\nfrom pip._internal.utils.misc import (\n WHEEL_EXTENSION, path_to_url, redact_password_from_url,\n split_auth_from_netloc, splitext,\n)\nfrom pip._internal.utils.models import KeyBasedCompareMixin\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nif MYPY_CHECK_RUNNING:\n from typing import Optional, Text, Tuple, Union\n from pip._internal.index import HTMLPage\n from pip._internal.utils.hashes import Hashes\n\n\nclass Link(KeyBasedCompareMixin):\n \"\"\"Represents a parsed link from a Package Index's simple URL\n \"\"\"\n\n def __init__(\n self,\n url, # type: str\n comes_from=None, # type: Optional[Union[str, HTMLPage]]\n requires_python=None, # type: Optional[str]\n yanked_reason=None, # type: Optional[Text]\n ):\n # type: (...) -> None\n \"\"\"\n :param url: url of the resource pointed to (href of the link)\n :param comes_from: instance of HTMLPage where the link was found,\n or string.\n :param requires_python: String containing the `Requires-Python`\n metadata field, specified in PEP 345. This may be specified by\n a data-requires-python attribute in the HTML link tag, as\n described in PEP 503.\n :param yanked_reason: the reason the file has been yanked, if the\n file has been yanked, or None if the file hasn't been yanked.\n This is the value of the \"data-yanked\" attribute, if present, in\n a simple repository HTML link. If the file has been yanked but\n no reason was provided, this should be the empty string. See\n PEP 592 for more information and the specification.\n \"\"\"\n\n # url can be a UNC windows share\n if url.startswith('\\\\\\\\'):\n url = path_to_url(url)\n\n self._parsed_url = urllib_parse.urlsplit(url)\n # Store the url as a private attribute to prevent accidentally\n # trying to set a new value.\n self._url = url\n\n self.comes_from = comes_from\n self.requires_python = requires_python if requires_python else None\n self.yanked_reason = yanked_reason\n\n super(Link, self).__init__(key=url, defining_class=Link)\n\n def __str__(self):\n if self.requires_python:\n rp = ' (requires-python:%s)' % self.requires_python\n else:\n rp = ''\n if self.comes_from:\n return '%s (from %s)%s' % (redact_password_from_url(self._url),\n self.comes_from, rp)\n else:\n return redact_password_from_url(str(self._url))\n\n def __repr__(self):\n return '<Link %s>' % self\n\n @property\n def url(self):\n # type: () -> str\n return self._url\n\n @property\n def filename(self):\n # type: () -> str\n path = self.path.rstrip('/')\n name = posixpath.basename(path)\n if not name:\n # Make sure we don't leak auth information if the netloc\n # includes a username and password.\n netloc, user_pass = split_auth_from_netloc(self.netloc)\n return netloc\n\n name = urllib_parse.unquote(name)\n assert name, ('URL %r produced no filename' % self._url)\n return name\n\n @property\n def scheme(self):\n # type: () -> str\n return self._parsed_url.scheme\n\n @property\n def netloc(self):\n # type: () -> str\n \"\"\"\n This can contain auth information.\n \"\"\"\n return self._parsed_url.netloc\n\n @property\n def path(self):\n # type: () -> str\n return urllib_parse.unquote(self._parsed_url.path)\n\n def splitext(self):\n # type: () -> Tuple[str, str]\n return splitext(posixpath.basename(self.path.rstrip('/')))\n\n @property\n def ext(self):\n # type: () -> str\n return self.splitext()[1]\n\n @property\n def url_without_fragment(self):\n # type: () -> str\n scheme, netloc, path, query, fragment = self._parsed_url\n return urllib_parse.urlunsplit((scheme, netloc, path, query, None))\n\n _egg_fragment_re = re.compile(r'[#&]egg=([^&]*)')\n\n @property\n def egg_fragment(self):\n # type: () -> Optional[str]\n match = self._egg_fragment_re.search(self._url)\n if not match:\n return None\n return match.group(1)\n\n _subdirectory_fragment_re = re.compile(r'[#&]subdirectory=([^&]*)')\n\n @property\n def subdirectory_fragment(self):\n # type: () -> Optional[str]\n match = self._subdirectory_fragment_re.search(self._url)\n if not match:\n return None\n return match.group(1)\n\n _hash_re = re.compile(\n r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)'\n )\n\n @property\n def hash(self):\n # type: () -> Optional[str]\n match = self._hash_re.search(self._url)\n if match:\n return match.group(2)\n return None\n\n @property\n def hash_name(self):\n # type: () -> Optional[str]\n match = self._hash_re.search(self._url)\n if match:\n return match.group(1)\n return None\n\n @property\n def show_url(self):\n # type: () -> Optional[str]\n return posixpath.basename(self._url.split('#', 1)[0].split('?', 1)[0])\n\n @property\n def is_wheel(self):\n # type: () -> bool\n return self.ext == WHEEL_EXTENSION\n\n @property\n def is_artifact(self):\n # type: () -> bool\n \"\"\"\n Determines if this points to an actual artifact (e.g. a tarball) or if\n it points to an \"abstract\" thing like a path or a VCS location.\n \"\"\"\n from pip._internal.vcs import vcs\n\n if self.scheme in vcs.all_schemes:\n return False\n\n return True\n\n @property\n def is_yanked(self):\n # type: () -> bool\n return self.yanked_reason is not None\n\n @property\n def has_hash(self):\n return self.hash_name is not None\n\n def is_hash_allowed(self, hashes):\n # type: (Hashes) -> bool\n \"\"\"\n Return True if the link has a hash and it is allowed.\n \"\"\"\n if not self.has_hash:\n return False\n # Assert non-None so mypy knows self.hash_name and self.hash are str.\n assert self.hash_name is not None\n assert self.hash is not None\n\n return hashes.is_hash_allowed(self.hash_name, hex_digest=self.hash)\n", "path": "src/pip/_internal/models/link.py"}]} | 3,232 | 174 |
gh_patches_debug_23694 | rasdani/github-patches | git_diff | pre-commit__pre-commit-718 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Handle when `core.hooksPath` is set?
As we found in https://github.com/pre-commit/pre-commit-hooks/issues/250, pre-commit (despite being installed) will be silently skipped if `core.hooksPath` is set.
A few options:
- during `pre-commit install`, check this variable and warn
- "" but error
- install into the directory at `core.hooksPath` (but it may be outside the working dir? probably not the best idea to write to it)
</issue>
<code>
[start of pre_commit/commands/install_uninstall.py]
1 from __future__ import print_function
2 from __future__ import unicode_literals
3
4 import io
5 import os.path
6 import sys
7
8 from pre_commit import output
9 from pre_commit.util import make_executable
10 from pre_commit.util import mkdirp
11 from pre_commit.util import resource_filename
12
13
14 # This is used to identify the hook file we install
15 PRIOR_HASHES = (
16 '4d9958c90bc262f47553e2c073f14cfe',
17 'd8ee923c46731b42cd95cc869add4062',
18 '49fd668cb42069aa1b6048464be5d395',
19 '79f09a650522a87b0da915d0d983b2de',
20 'e358c9dae00eac5d06b38dfdb1e33a8c',
21 )
22 CURRENT_HASH = '138fd403232d2ddd5efb44317e38bf03'
23 TEMPLATE_START = '# start templated\n'
24 TEMPLATE_END = '# end templated\n'
25
26
27 def is_our_script(filename):
28 if not os.path.exists(filename):
29 return False
30 contents = io.open(filename).read()
31 return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)
32
33
34 def install(
35 runner, overwrite=False, hooks=False, hook_type='pre-commit',
36 skip_on_missing_conf=False,
37 ):
38 """Install the pre-commit hooks."""
39 hook_path = runner.get_hook_path(hook_type)
40 legacy_path = hook_path + '.legacy'
41
42 mkdirp(os.path.dirname(hook_path))
43
44 # If we have an existing hook, move it to pre-commit.legacy
45 if os.path.lexists(hook_path) and not is_our_script(hook_path):
46 os.rename(hook_path, legacy_path)
47
48 # If we specify overwrite, we simply delete the legacy file
49 if overwrite and os.path.exists(legacy_path):
50 os.remove(legacy_path)
51 elif os.path.exists(legacy_path):
52 output.write_line(
53 'Running in migration mode with existing hooks at {}\n'
54 'Use -f to use only pre-commit.'.format(legacy_path),
55 )
56
57 params = {
58 'CONFIG': runner.config_file,
59 'HOOK_TYPE': hook_type,
60 'INSTALL_PYTHON': sys.executable,
61 'SKIP_ON_MISSING_CONFIG': skip_on_missing_conf,
62 }
63
64 with io.open(hook_path, 'w') as hook_file:
65 with io.open(resource_filename('hook-tmpl')) as f:
66 contents = f.read()
67 before, rest = contents.split(TEMPLATE_START)
68 to_template, after = rest.split(TEMPLATE_END)
69
70 hook_file.write(before + TEMPLATE_START)
71 for line in to_template.splitlines():
72 var = line.split()[0]
73 hook_file.write('{} = {!r}\n'.format(var, params[var]))
74 hook_file.write(TEMPLATE_END + after)
75 make_executable(hook_path)
76
77 output.write_line('pre-commit installed at {}'.format(hook_path))
78
79 # If they requested we install all of the hooks, do so.
80 if hooks:
81 install_hooks(runner)
82
83 return 0
84
85
86 def install_hooks(runner):
87 for repository in runner.repositories:
88 repository.require_installed()
89
90
91 def uninstall(runner, hook_type='pre-commit'):
92 """Uninstall the pre-commit hooks."""
93 hook_path = runner.get_hook_path(hook_type)
94 legacy_path = hook_path + '.legacy'
95 # If our file doesn't exist or it isn't ours, gtfo.
96 if not os.path.exists(hook_path) or not is_our_script(hook_path):
97 return 0
98
99 os.remove(hook_path)
100 output.write_line('{} uninstalled'.format(hook_type))
101
102 if os.path.exists(legacy_path):
103 os.rename(legacy_path, hook_path)
104 output.write_line('Restored previous hooks to {}'.format(hook_path))
105
106 return 0
107
[end of pre_commit/commands/install_uninstall.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/commands/install_uninstall.py b/pre_commit/commands/install_uninstall.py
--- a/pre_commit/commands/install_uninstall.py
+++ b/pre_commit/commands/install_uninstall.py
@@ -2,15 +2,19 @@
from __future__ import unicode_literals
import io
+import logging
import os.path
import sys
from pre_commit import output
+from pre_commit.util import cmd_output
from pre_commit.util import make_executable
from pre_commit.util import mkdirp
from pre_commit.util import resource_filename
+logger = logging.getLogger(__name__)
+
# This is used to identify the hook file we install
PRIOR_HASHES = (
'4d9958c90bc262f47553e2c073f14cfe',
@@ -36,6 +40,13 @@
skip_on_missing_conf=False,
):
"""Install the pre-commit hooks."""
+ if cmd_output('git', 'config', 'core.hooksPath', retcode=None)[1].strip():
+ logger.error(
+ 'Cowardly refusing to install hooks with `core.hooksPath` set.\n'
+ 'hint: `git config --unset-all core.hooksPath`',
+ )
+ return 1
+
hook_path = runner.get_hook_path(hook_type)
legacy_path = hook_path + '.legacy'
| {"golden_diff": "diff --git a/pre_commit/commands/install_uninstall.py b/pre_commit/commands/install_uninstall.py\n--- a/pre_commit/commands/install_uninstall.py\n+++ b/pre_commit/commands/install_uninstall.py\n@@ -2,15 +2,19 @@\n from __future__ import unicode_literals\n \n import io\n+import logging\n import os.path\n import sys\n \n from pre_commit import output\n+from pre_commit.util import cmd_output\n from pre_commit.util import make_executable\n from pre_commit.util import mkdirp\n from pre_commit.util import resource_filename\n \n \n+logger = logging.getLogger(__name__)\n+\n # This is used to identify the hook file we install\n PRIOR_HASHES = (\n '4d9958c90bc262f47553e2c073f14cfe',\n@@ -36,6 +40,13 @@\n skip_on_missing_conf=False,\n ):\n \"\"\"Install the pre-commit hooks.\"\"\"\n+ if cmd_output('git', 'config', 'core.hooksPath', retcode=None)[1].strip():\n+ logger.error(\n+ 'Cowardly refusing to install hooks with `core.hooksPath` set.\\n'\n+ 'hint: `git config --unset-all core.hooksPath`',\n+ )\n+ return 1\n+\n hook_path = runner.get_hook_path(hook_type)\n legacy_path = hook_path + '.legacy'\n", "issue": "Handle when `core.hooksPath` is set?\nAs we found in https://github.com/pre-commit/pre-commit-hooks/issues/250, pre-commit (despite being installed) will be silently skipped if `core.hooksPath` is set.\r\n\r\nA few options:\r\n- during `pre-commit install`, check this variable and warn\r\n- \"\" but error\r\n- install into the directory at `core.hooksPath` (but it may be outside the working dir? probably not the best idea to write to it)\n", "before_files": [{"content": "from __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport io\nimport os.path\nimport sys\n\nfrom pre_commit import output\nfrom pre_commit.util import make_executable\nfrom pre_commit.util import mkdirp\nfrom pre_commit.util import resource_filename\n\n\n# This is used to identify the hook file we install\nPRIOR_HASHES = (\n '4d9958c90bc262f47553e2c073f14cfe',\n 'd8ee923c46731b42cd95cc869add4062',\n '49fd668cb42069aa1b6048464be5d395',\n '79f09a650522a87b0da915d0d983b2de',\n 'e358c9dae00eac5d06b38dfdb1e33a8c',\n)\nCURRENT_HASH = '138fd403232d2ddd5efb44317e38bf03'\nTEMPLATE_START = '# start templated\\n'\nTEMPLATE_END = '# end templated\\n'\n\n\ndef is_our_script(filename):\n if not os.path.exists(filename):\n return False\n contents = io.open(filename).read()\n return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)\n\n\ndef install(\n runner, overwrite=False, hooks=False, hook_type='pre-commit',\n skip_on_missing_conf=False,\n):\n \"\"\"Install the pre-commit hooks.\"\"\"\n hook_path = runner.get_hook_path(hook_type)\n legacy_path = hook_path + '.legacy'\n\n mkdirp(os.path.dirname(hook_path))\n\n # If we have an existing hook, move it to pre-commit.legacy\n if os.path.lexists(hook_path) and not is_our_script(hook_path):\n os.rename(hook_path, legacy_path)\n\n # If we specify overwrite, we simply delete the legacy file\n if overwrite and os.path.exists(legacy_path):\n os.remove(legacy_path)\n elif os.path.exists(legacy_path):\n output.write_line(\n 'Running in migration mode with existing hooks at {}\\n'\n 'Use -f to use only pre-commit.'.format(legacy_path),\n )\n\n params = {\n 'CONFIG': runner.config_file,\n 'HOOK_TYPE': hook_type,\n 'INSTALL_PYTHON': sys.executable,\n 'SKIP_ON_MISSING_CONFIG': skip_on_missing_conf,\n }\n\n with io.open(hook_path, 'w') as hook_file:\n with io.open(resource_filename('hook-tmpl')) as f:\n contents = f.read()\n before, rest = contents.split(TEMPLATE_START)\n to_template, after = rest.split(TEMPLATE_END)\n\n hook_file.write(before + TEMPLATE_START)\n for line in to_template.splitlines():\n var = line.split()[0]\n hook_file.write('{} = {!r}\\n'.format(var, params[var]))\n hook_file.write(TEMPLATE_END + after)\n make_executable(hook_path)\n\n output.write_line('pre-commit installed at {}'.format(hook_path))\n\n # If they requested we install all of the hooks, do so.\n if hooks:\n install_hooks(runner)\n\n return 0\n\n\ndef install_hooks(runner):\n for repository in runner.repositories:\n repository.require_installed()\n\n\ndef uninstall(runner, hook_type='pre-commit'):\n \"\"\"Uninstall the pre-commit hooks.\"\"\"\n hook_path = runner.get_hook_path(hook_type)\n legacy_path = hook_path + '.legacy'\n # If our file doesn't exist or it isn't ours, gtfo.\n if not os.path.exists(hook_path) or not is_our_script(hook_path):\n return 0\n\n os.remove(hook_path)\n output.write_line('{} uninstalled'.format(hook_type))\n\n if os.path.exists(legacy_path):\n os.rename(legacy_path, hook_path)\n output.write_line('Restored previous hooks to {}'.format(hook_path))\n\n return 0\n", "path": "pre_commit/commands/install_uninstall.py"}]} | 1,774 | 308 |
gh_patches_debug_10086 | rasdani/github-patches | git_diff | pretix__pretix-241 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Substring search when matching voucher tags
I might use a tagging structure such as "DE-Berlin", "DE-Stuttgart" for vouchers, and it'd be neat to match all "DE-" tagged vouchers at once.
</issue>
<code>
[start of src/pretix/control/views/vouchers.py]
1 import csv
2 import io
3
4 from django.conf import settings
5 from django.contrib import messages
6 from django.core.urlresolvers import resolve, reverse
7 from django.db import transaction
8 from django.db.models import Count, Q, Sum
9 from django.http import (
10 Http404, HttpResponse, HttpResponseBadRequest, HttpResponseRedirect,
11 JsonResponse,
12 )
13 from django.utils.timezone import now
14 from django.utils.translation import ugettext_lazy as _
15 from django.views.generic import (
16 CreateView, DeleteView, ListView, TemplateView, UpdateView, View,
17 )
18
19 from pretix.base.models import Voucher
20 from pretix.base.models.vouchers import _generate_random_code
21 from pretix.control.forms.vouchers import VoucherBulkForm, VoucherForm
22 from pretix.control.permissions import EventPermissionRequiredMixin
23 from pretix.control.signals import voucher_form_class
24
25
26 class VoucherList(EventPermissionRequiredMixin, ListView):
27 model = Voucher
28 context_object_name = 'vouchers'
29 paginate_by = 30
30 template_name = 'pretixcontrol/vouchers/index.html'
31 permission = 'can_change_vouchers'
32
33 def get_queryset(self):
34 qs = self.request.event.vouchers.all().select_related('item', 'variation')
35 if self.request.GET.get("search", "") != "":
36 s = self.request.GET.get("search", "")
37 qs = qs.filter(Q(code__icontains=s) | Q(tag__icontains=s) | Q(comment__icontains=s))
38 if self.request.GET.get("tag", "") != "":
39 s = self.request.GET.get("tag", "")
40 qs = qs.filter(tag=s)
41 if self.request.GET.get("status", "") != "":
42 s = self.request.GET.get("status", "")
43 if s == 'v':
44 qs = qs.filter(Q(valid_until__isnull=True) | Q(valid_until__gt=now())).filter(redeemed=False)
45 elif s == 'r':
46 qs = qs.filter(redeemed=True)
47 elif s == 'e':
48 qs = qs.filter(Q(valid_until__isnull=False) & Q(valid_until__lt=now())).filter(redeemed=False)
49 return qs
50
51 def get(self, request, *args, **kwargs):
52 if request.GET.get("download", "") == "yes":
53 return self._download_csv()
54 return super().get(request, *args, **kwargs)
55
56 def _download_csv(self):
57 output = io.StringIO()
58 writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC, delimiter=",")
59
60 headers = [
61 _('Voucher code'), _('Valid until'), _('Product'), _('Reserve quota'), _('Bypass quota'),
62 _('Price'), _('Tag'), _('Redeemed')
63 ]
64 writer.writerow(headers)
65
66 for v in self.get_queryset():
67 if v.item:
68 if v.variation:
69 prod = '%s – %s' % (str(v.item.name), str(v.variation.name))
70 else:
71 prod = '%s' % str(v.item.name)
72 elif v.quota:
73 prod = _('Any product in quota "{quota}"').format(quota=str(v.quota.name))
74 row = [
75 v.code,
76 v.valid_until.isoformat() if v.valid_until else "",
77 prod,
78 _("Yes") if v.block_quota else _("No"),
79 _("Yes") if v.allow_ignore_quota else _("No"),
80 str(v.price) if v.price else "",
81 v.tag,
82 _("Yes") if v.redeemed else _("No"),
83 ]
84 writer.writerow(row)
85
86 r = HttpResponse(output.getvalue().encode("utf-8"), content_type='text/csv')
87 r['Content-Disposition'] = 'attachment; filename="vouchers.csv"'
88 return r
89
90
91 class VoucherTags(EventPermissionRequiredMixin, TemplateView):
92 template_name = 'pretixcontrol/vouchers/tags.html'
93 permission = 'can_change_vouchers'
94
95 def get_context_data(self, **kwargs):
96 ctx = super().get_context_data(**kwargs)
97
98 tags = self.request.event.vouchers.order_by('tag').filter(tag__isnull=False).values('tag').annotate(
99 total=Count('id'),
100 redeemed=Sum('redeemed')
101 )
102 for t in tags:
103 t['percentage'] = int((t['redeemed'] / t['total']) * 100)
104
105 ctx['tags'] = tags
106 return ctx
107
108
109 class VoucherDelete(EventPermissionRequiredMixin, DeleteView):
110 model = Voucher
111 template_name = 'pretixcontrol/vouchers/delete.html'
112 permission = 'can_change_vouchers'
113 context_object_name = 'voucher'
114
115 def get_object(self, queryset=None) -> Voucher:
116 try:
117 return self.request.event.vouchers.get(
118 id=self.kwargs['voucher']
119 )
120 except Voucher.DoesNotExist:
121 raise Http404(_("The requested voucher does not exist."))
122
123 def get(self, request, *args, **kwargs):
124 if self.get_object().redeemed:
125 messages.error(request, _('A voucher can not be deleted if it already has been redeemed.'))
126 return HttpResponseRedirect(self.get_success_url())
127 return super().get(request, *args, **kwargs)
128
129 @transaction.atomic
130 def delete(self, request, *args, **kwargs):
131 self.object = self.get_object()
132 success_url = self.get_success_url()
133
134 if self.object.redeemed:
135 messages.error(request, _('A voucher can not be deleted if it already has been redeemed.'))
136 else:
137 self.object.log_action('pretix.voucher.deleted', user=self.request.user)
138 self.object.delete()
139 messages.success(request, _('The selected voucher has been deleted.'))
140 return HttpResponseRedirect(success_url)
141
142 def get_success_url(self) -> str:
143 return reverse('control:event.vouchers', kwargs={
144 'organizer': self.request.event.organizer.slug,
145 'event': self.request.event.slug,
146 })
147
148
149 class VoucherUpdate(EventPermissionRequiredMixin, UpdateView):
150 model = Voucher
151 template_name = 'pretixcontrol/vouchers/detail.html'
152 permission = 'can_change_vouchers'
153 context_object_name = 'voucher'
154
155 def get_form_class(self):
156 form_class = VoucherForm
157 for receiver, response in voucher_form_class.send(self.request.event, cls=form_class):
158 if response:
159 form_class = response
160 return form_class
161
162 def get_object(self, queryset=None) -> VoucherForm:
163 url = resolve(self.request.path_info)
164 try:
165 return self.request.event.vouchers.get(
166 id=url.kwargs['voucher']
167 )
168 except Voucher.DoesNotExist:
169 raise Http404(_("The requested voucher does not exist."))
170
171 @transaction.atomic
172 def form_valid(self, form):
173 messages.success(self.request, _('Your changes have been saved.'))
174 if form.has_changed():
175 self.object.log_action(
176 'pretix.voucher.changed', user=self.request.user, data={
177 k: form.cleaned_data.get(k) for k in form.changed_data
178 }
179 )
180 return super().form_valid(form)
181
182 def get_success_url(self) -> str:
183 return reverse('control:event.vouchers', kwargs={
184 'organizer': self.request.event.organizer.slug,
185 'event': self.request.event.slug,
186 })
187
188
189 class VoucherCreate(EventPermissionRequiredMixin, CreateView):
190 model = Voucher
191 template_name = 'pretixcontrol/vouchers/detail.html'
192 permission = 'can_change_vouchers'
193 context_object_name = 'voucher'
194
195 def get_form_class(self):
196 form_class = VoucherForm
197 for receiver, response in voucher_form_class.send(self.request.event, cls=form_class):
198 if response:
199 form_class = response
200 return form_class
201
202 def get_success_url(self) -> str:
203 return reverse('control:event.vouchers', kwargs={
204 'organizer': self.request.event.organizer.slug,
205 'event': self.request.event.slug,
206 })
207
208 def get_form_kwargs(self):
209 kwargs = super().get_form_kwargs()
210 kwargs['instance'] = Voucher(event=self.request.event)
211 return kwargs
212
213 @transaction.atomic
214 def form_valid(self, form):
215 form.instance.event = self.request.event
216 messages.success(self.request, _('The new voucher has been created.'))
217 ret = super().form_valid(form)
218 form.instance.log_action('pretix.voucher.added', data=dict(form.cleaned_data), user=self.request.user)
219 return ret
220
221 def post(self, request, *args, **kwargs):
222 # TODO: Transform this into an asynchronous call?
223 with request.event.lock():
224 return super().post(request, *args, **kwargs)
225
226
227 class VoucherBulkCreate(EventPermissionRequiredMixin, CreateView):
228 model = Voucher
229 template_name = 'pretixcontrol/vouchers/bulk.html'
230 permission = 'can_change_vouchers'
231 context_object_name = 'voucher'
232
233 def get_success_url(self) -> str:
234 return reverse('control:event.vouchers', kwargs={
235 'organizer': self.request.event.organizer.slug,
236 'event': self.request.event.slug,
237 })
238
239 def get_form_kwargs(self):
240 kwargs = super().get_form_kwargs()
241 kwargs['instance'] = Voucher(event=self.request.event)
242 return kwargs
243
244 @transaction.atomic
245 def form_valid(self, form):
246 for o in form.save(self.request.event):
247 o.log_action('pretix.voucher.added', data=form.cleaned_data, user=self.request.user)
248 messages.success(self.request, _('The new vouchers have been created.'))
249 return HttpResponseRedirect(self.get_success_url())
250
251 def get_form_class(self):
252 form_class = VoucherBulkForm
253 for receiver, response in voucher_form_class.send(self.request.event, cls=form_class):
254 if response:
255 form_class = response
256 return form_class
257
258 def get_context_data(self, **kwargs):
259 ctx = super().get_context_data(**kwargs)
260 ctx['code_length'] = settings.ENTROPY['voucher_code']
261 return ctx
262
263 def post(self, request, *args, **kwargs):
264 # TODO: Transform this into an asynchronous call?
265 with request.event.lock():
266 return super().post(request, *args, **kwargs)
267
268
269 class VoucherRNG(EventPermissionRequiredMixin, View):
270 template_name = 'pretixcontrol/vouchers/bulk.html'
271 permission = 'can_change_vouchers'
272
273 def get(self, request, *args, **kwargs):
274 codes = set()
275 try:
276 num = int(request.GET.get('num', '5'))
277 except ValueError:
278 return HttpResponseBadRequest()
279
280 while len(codes) < num:
281 new_codes = set()
282 for i in range(min(num - len(codes), 500)): # Work around SQLite's SQLITE_MAX_VARIABLE_NUMBER
283 new_codes.add(_generate_random_code())
284 new_codes -= set([v['code'] for v in Voucher.objects.filter(code__in=new_codes).values('code')])
285 codes |= new_codes
286
287 return JsonResponse({
288 'codes': list(codes)
289 })
290
291 def get_success_url(self) -> str:
292 return reverse('control:event.vouchers', kwargs={
293 'organizer': self.request.event.organizer.slug,
294 'event': self.request.event.slug,
295 })
296
[end of src/pretix/control/views/vouchers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pretix/control/views/vouchers.py b/src/pretix/control/views/vouchers.py
--- a/src/pretix/control/views/vouchers.py
+++ b/src/pretix/control/views/vouchers.py
@@ -37,7 +37,7 @@
qs = qs.filter(Q(code__icontains=s) | Q(tag__icontains=s) | Q(comment__icontains=s))
if self.request.GET.get("tag", "") != "":
s = self.request.GET.get("tag", "")
- qs = qs.filter(tag=s)
+ qs = qs.filter(tag__icontains=s)
if self.request.GET.get("status", "") != "":
s = self.request.GET.get("status", "")
if s == 'v':
| {"golden_diff": "diff --git a/src/pretix/control/views/vouchers.py b/src/pretix/control/views/vouchers.py\n--- a/src/pretix/control/views/vouchers.py\n+++ b/src/pretix/control/views/vouchers.py\n@@ -37,7 +37,7 @@\n qs = qs.filter(Q(code__icontains=s) | Q(tag__icontains=s) | Q(comment__icontains=s))\n if self.request.GET.get(\"tag\", \"\") != \"\":\n s = self.request.GET.get(\"tag\", \"\")\n- qs = qs.filter(tag=s)\n+ qs = qs.filter(tag__icontains=s)\n if self.request.GET.get(\"status\", \"\") != \"\":\n s = self.request.GET.get(\"status\", \"\")\n if s == 'v':\n", "issue": "Substring search when matching voucher tags\nI might use a tagging structure such as \"DE-Berlin\", \"DE-Stuttgart\" for vouchers, and it'd be neat to match all \"DE-\" tagged vouchers at once.\n\n", "before_files": [{"content": "import csv\nimport io\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.core.urlresolvers import resolve, reverse\nfrom django.db import transaction\nfrom django.db.models import Count, Q, Sum\nfrom django.http import (\n Http404, HttpResponse, HttpResponseBadRequest, HttpResponseRedirect,\n JsonResponse,\n)\nfrom django.utils.timezone import now\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import (\n CreateView, DeleteView, ListView, TemplateView, UpdateView, View,\n)\n\nfrom pretix.base.models import Voucher\nfrom pretix.base.models.vouchers import _generate_random_code\nfrom pretix.control.forms.vouchers import VoucherBulkForm, VoucherForm\nfrom pretix.control.permissions import EventPermissionRequiredMixin\nfrom pretix.control.signals import voucher_form_class\n\n\nclass VoucherList(EventPermissionRequiredMixin, ListView):\n model = Voucher\n context_object_name = 'vouchers'\n paginate_by = 30\n template_name = 'pretixcontrol/vouchers/index.html'\n permission = 'can_change_vouchers'\n\n def get_queryset(self):\n qs = self.request.event.vouchers.all().select_related('item', 'variation')\n if self.request.GET.get(\"search\", \"\") != \"\":\n s = self.request.GET.get(\"search\", \"\")\n qs = qs.filter(Q(code__icontains=s) | Q(tag__icontains=s) | Q(comment__icontains=s))\n if self.request.GET.get(\"tag\", \"\") != \"\":\n s = self.request.GET.get(\"tag\", \"\")\n qs = qs.filter(tag=s)\n if self.request.GET.get(\"status\", \"\") != \"\":\n s = self.request.GET.get(\"status\", \"\")\n if s == 'v':\n qs = qs.filter(Q(valid_until__isnull=True) | Q(valid_until__gt=now())).filter(redeemed=False)\n elif s == 'r':\n qs = qs.filter(redeemed=True)\n elif s == 'e':\n qs = qs.filter(Q(valid_until__isnull=False) & Q(valid_until__lt=now())).filter(redeemed=False)\n return qs\n\n def get(self, request, *args, **kwargs):\n if request.GET.get(\"download\", \"\") == \"yes\":\n return self._download_csv()\n return super().get(request, *args, **kwargs)\n\n def _download_csv(self):\n output = io.StringIO()\n writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC, delimiter=\",\")\n\n headers = [\n _('Voucher code'), _('Valid until'), _('Product'), _('Reserve quota'), _('Bypass quota'),\n _('Price'), _('Tag'), _('Redeemed')\n ]\n writer.writerow(headers)\n\n for v in self.get_queryset():\n if v.item:\n if v.variation:\n prod = '%s \u2013 %s' % (str(v.item.name), str(v.variation.name))\n else:\n prod = '%s' % str(v.item.name)\n elif v.quota:\n prod = _('Any product in quota \"{quota}\"').format(quota=str(v.quota.name))\n row = [\n v.code,\n v.valid_until.isoformat() if v.valid_until else \"\",\n prod,\n _(\"Yes\") if v.block_quota else _(\"No\"),\n _(\"Yes\") if v.allow_ignore_quota else _(\"No\"),\n str(v.price) if v.price else \"\",\n v.tag,\n _(\"Yes\") if v.redeemed else _(\"No\"),\n ]\n writer.writerow(row)\n\n r = HttpResponse(output.getvalue().encode(\"utf-8\"), content_type='text/csv')\n r['Content-Disposition'] = 'attachment; filename=\"vouchers.csv\"'\n return r\n\n\nclass VoucherTags(EventPermissionRequiredMixin, TemplateView):\n template_name = 'pretixcontrol/vouchers/tags.html'\n permission = 'can_change_vouchers'\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n\n tags = self.request.event.vouchers.order_by('tag').filter(tag__isnull=False).values('tag').annotate(\n total=Count('id'),\n redeemed=Sum('redeemed')\n )\n for t in tags:\n t['percentage'] = int((t['redeemed'] / t['total']) * 100)\n\n ctx['tags'] = tags\n return ctx\n\n\nclass VoucherDelete(EventPermissionRequiredMixin, DeleteView):\n model = Voucher\n template_name = 'pretixcontrol/vouchers/delete.html'\n permission = 'can_change_vouchers'\n context_object_name = 'voucher'\n\n def get_object(self, queryset=None) -> Voucher:\n try:\n return self.request.event.vouchers.get(\n id=self.kwargs['voucher']\n )\n except Voucher.DoesNotExist:\n raise Http404(_(\"The requested voucher does not exist.\"))\n\n def get(self, request, *args, **kwargs):\n if self.get_object().redeemed:\n messages.error(request, _('A voucher can not be deleted if it already has been redeemed.'))\n return HttpResponseRedirect(self.get_success_url())\n return super().get(request, *args, **kwargs)\n\n @transaction.atomic\n def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n success_url = self.get_success_url()\n\n if self.object.redeemed:\n messages.error(request, _('A voucher can not be deleted if it already has been redeemed.'))\n else:\n self.object.log_action('pretix.voucher.deleted', user=self.request.user)\n self.object.delete()\n messages.success(request, _('The selected voucher has been deleted.'))\n return HttpResponseRedirect(success_url)\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n\n\nclass VoucherUpdate(EventPermissionRequiredMixin, UpdateView):\n model = Voucher\n template_name = 'pretixcontrol/vouchers/detail.html'\n permission = 'can_change_vouchers'\n context_object_name = 'voucher'\n\n def get_form_class(self):\n form_class = VoucherForm\n for receiver, response in voucher_form_class.send(self.request.event, cls=form_class):\n if response:\n form_class = response\n return form_class\n\n def get_object(self, queryset=None) -> VoucherForm:\n url = resolve(self.request.path_info)\n try:\n return self.request.event.vouchers.get(\n id=url.kwargs['voucher']\n )\n except Voucher.DoesNotExist:\n raise Http404(_(\"The requested voucher does not exist.\"))\n\n @transaction.atomic\n def form_valid(self, form):\n messages.success(self.request, _('Your changes have been saved.'))\n if form.has_changed():\n self.object.log_action(\n 'pretix.voucher.changed', user=self.request.user, data={\n k: form.cleaned_data.get(k) for k in form.changed_data\n }\n )\n return super().form_valid(form)\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n\n\nclass VoucherCreate(EventPermissionRequiredMixin, CreateView):\n model = Voucher\n template_name = 'pretixcontrol/vouchers/detail.html'\n permission = 'can_change_vouchers'\n context_object_name = 'voucher'\n\n def get_form_class(self):\n form_class = VoucherForm\n for receiver, response in voucher_form_class.send(self.request.event, cls=form_class):\n if response:\n form_class = response\n return form_class\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['instance'] = Voucher(event=self.request.event)\n return kwargs\n\n @transaction.atomic\n def form_valid(self, form):\n form.instance.event = self.request.event\n messages.success(self.request, _('The new voucher has been created.'))\n ret = super().form_valid(form)\n form.instance.log_action('pretix.voucher.added', data=dict(form.cleaned_data), user=self.request.user)\n return ret\n\n def post(self, request, *args, **kwargs):\n # TODO: Transform this into an asynchronous call?\n with request.event.lock():\n return super().post(request, *args, **kwargs)\n\n\nclass VoucherBulkCreate(EventPermissionRequiredMixin, CreateView):\n model = Voucher\n template_name = 'pretixcontrol/vouchers/bulk.html'\n permission = 'can_change_vouchers'\n context_object_name = 'voucher'\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['instance'] = Voucher(event=self.request.event)\n return kwargs\n\n @transaction.atomic\n def form_valid(self, form):\n for o in form.save(self.request.event):\n o.log_action('pretix.voucher.added', data=form.cleaned_data, user=self.request.user)\n messages.success(self.request, _('The new vouchers have been created.'))\n return HttpResponseRedirect(self.get_success_url())\n\n def get_form_class(self):\n form_class = VoucherBulkForm\n for receiver, response in voucher_form_class.send(self.request.event, cls=form_class):\n if response:\n form_class = response\n return form_class\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx['code_length'] = settings.ENTROPY['voucher_code']\n return ctx\n\n def post(self, request, *args, **kwargs):\n # TODO: Transform this into an asynchronous call?\n with request.event.lock():\n return super().post(request, *args, **kwargs)\n\n\nclass VoucherRNG(EventPermissionRequiredMixin, View):\n template_name = 'pretixcontrol/vouchers/bulk.html'\n permission = 'can_change_vouchers'\n\n def get(self, request, *args, **kwargs):\n codes = set()\n try:\n num = int(request.GET.get('num', '5'))\n except ValueError:\n return HttpResponseBadRequest()\n\n while len(codes) < num:\n new_codes = set()\n for i in range(min(num - len(codes), 500)): # Work around SQLite's SQLITE_MAX_VARIABLE_NUMBER\n new_codes.add(_generate_random_code())\n new_codes -= set([v['code'] for v in Voucher.objects.filter(code__in=new_codes).values('code')])\n codes |= new_codes\n\n return JsonResponse({\n 'codes': list(codes)\n })\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n", "path": "src/pretix/control/views/vouchers.py"}]} | 3,765 | 158 |
gh_patches_debug_39346 | rasdani/github-patches | git_diff | dotkom__onlineweb4-2376 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Changing primary email address should change email address used for list subscriptions
## What kind of an issue is this?
- Bug report
## What is the expected behaviour?
If I change my primary email address I expect the mailing list services to be updated with my new address, whilst removing my old address.
## What is the current behaviour?
The old address is not removed and the new address is not added.
## How do you reproduce this problem?
* (Add a new email to an account) (_Only required if there is no alternative email added to the account already_)
* Set the new email address as the primary
* _Notice that the action happens immediately_
* (Vs. unsubscribing / subscribing to a list, which queries the mailing list service to add/remove addresses.)
* Verify that the old primary address is still on the list
* Verify that the new primary address is absent from the list
## Other information
The settings page tells the user that the primary address is the one used for mailing lists:
> **Adressen som er markert som primær brukes til alle våre e-postlister**, gjenoppretning av passord og andre varsler som sendes fra oss.
The [`set_primary`](https://github.com/dotkom/onlineweb4/blob/v4.24.28/apps/profiles/views.py#L326) function does not handle subscribing/unsubscribing to a list. (Neither does the [`Email model`](https://github.com/dotkom/onlineweb4/blob/v4.24.28/apps/authentication/models.py#L278) nor any signals attached to it.)
</issue>
<code>
[start of onlineweb4/settings/gsuite.py]
1 import os
2
3 from decouple import config
4
5 from .base import PROJECT_ROOT_DIRECTORY
6
7 OW4_GSUITE_CREDENTIALS_FILENAME = config('OW4_GSUITE_CREDENTIALS_FILENAME', default='gsuitecredentials.json')
8 OW4_GSUITE_CREDENTIALS_PATH = config('OW4_GSUITE_CREDENTIALS_PATH',
9 default=os.path.join(PROJECT_ROOT_DIRECTORY, OW4_GSUITE_CREDENTIALS_FILENAME))
10
11 OW4_GSUITE_SETTINGS = {
12 'CREDENTIALS': OW4_GSUITE_CREDENTIALS_PATH,
13 'DOMAIN': config('OW4_GSUITE_SYNC_DOMAIN', default='online.ntnu.no'),
14 # DELEGATED_ACCOUNT: G Suite Account with proper permissions to perform insertions and removals.
15 'DELEGATED_ACCOUNT': config('OW4_GSUITE_DELEGATED_ACCOUNT', default=''),
16 'ENABLED': config('OW4_GSUITE_ENABLED', cast=bool, default=False),
17 }
18
19 OW4_GSUITE_ACCOUNTS = {
20 'ENABLED': config('OW4_GSUITE_ACCOUNTS_ENABLED', cast=bool, default=False),
21 'ENABLE_INSERT': config('OW4_GSUITE_ACCOUNTS_ENABLE_INSERT', cast=bool, default=False),
22 }
23
24 OW4_GSUITE_SYNC = {
25 'CREDENTIALS': OW4_GSUITE_SETTINGS.get('CREDENTIALS'),
26 'DOMAIN': OW4_GSUITE_SETTINGS.get('DOMAIN'),
27 'DELEGATED_ACCOUNT': OW4_GSUITE_SETTINGS.get('DELEGATED_ACCOUNT'),
28 'ENABLED': config('OW4_GSUITE_SYNC_ENABLED', cast=bool, default=False),
29 'ENABLE_INSERT': config('OW4_GSUITE_SYNC_ENABLE_INSERT', cast=bool, default=False),
30 'ENABLE_DELETE': config('OW4_GSUITE_SYNC_ENABLE_DELETE', cast=bool, default=False),
31 # OW4 name (lowercase) -> G Suite name (lowercase)
32 'GROUPS': {
33 'appkom': 'appkom',
34 'arrkom': 'arrkom',
35 'bankom': 'bankom',
36 'bedkom': 'bedkom',
37 'dotkom': 'dotkom',
38 'ekskom': 'ekskom',
39 'fagkom': 'fagkom',
40 'fond': 'fond',
41 'hovedstyret': 'hovedstyret',
42 'jubkom': 'jubkom',
43 'prokom': 'prokom',
44 'seniorkom': 'seniorkom',
45 'trikom': 'trikom',
46 'tillitsvalgte': 'tillitsvalgte',
47 'redaksjonen': 'redaksjonen',
48 'ekskom': 'ekskom',
49 'itex' : 'itex',
50 'velkom': 'velkom',
51 'interessegrupper': 'interessegrupper',
52 'online-il': 'online-il',
53 'techtalks' : 'techtalks'
54 }
55 }
56
[end of onlineweb4/settings/gsuite.py]
[start of apps/authentication/signals.py]
1 # -*- coding: utf-8 -*-
2 import logging
3 import uuid
4
5 from django.conf import settings
6 from django.contrib.auth import get_user_model
7 from django.contrib.auth.models import Group
8 from django.db.models.signals import m2m_changed, post_save, pre_delete
9 from django.dispatch import receiver
10
11 from apps.authentication.models import GroupMember, OnlineGroup
12 from apps.authentication.tasks import SynchronizeGroups
13 from apps.gsuite.mail_syncer.main import update_g_suite_group, update_g_suite_user
14
15 User = get_user_model()
16 logger = logging.getLogger("syncer.%s" % __name__)
17 sync_uuid = uuid.uuid1()
18
19
20 def run_group_syncer(user):
21 """
22 Tasks to run after User is changed.
23 :param user: The user instance to sync groups for.
24 :type user: OnlineUser
25 :return: None
26 """
27 SynchronizeGroups.run()
28 if settings.OW4_GSUITE_SYNC.get("ENABLED", False):
29 ow4_gsuite_domain = settings.OW4_GSUITE_SYNC.get("DOMAIN")
30 if isinstance(user, User):
31 logger.debug("Running G Suite syncer for user {}".format(user))
32 update_g_suite_user(ow4_gsuite_domain, user, suppress_http_errors=True)
33 elif isinstance(user, Group):
34 group = user
35 logger.debug("Running G Suite syncer for group {}".format(group))
36 update_g_suite_group(
37 ow4_gsuite_domain, group.name, suppress_http_errors=True
38 )
39
40
41 @receiver(post_save, sender=Group)
42 def trigger_group_syncer(sender, instance, created=False, **kwargs):
43 """
44 :param sender: The model that triggered this hook
45 :param instance: The model instance triggering this hook
46 :param created: True if the instance was created, False if the instance was updated
47
48 Calls the SynchronizeGroups Task if a group is updated. (Not if it's the initial creation of a group)
49 """
50 global sync_uuid
51
52 if created:
53 # If a new instance is created, we do not need to trigger group sync.
54 pass
55 else:
56 # If sync is triggered by adding a user to group or a group to a user
57 # then we need to detach the signal hook listening to m2m changes on
58 # those models as they will trigger a recursive call to this method.
59 if sender == User.groups.through:
60 logger.debug(
61 "Disconnect m2m_changed signal hook with uuid %s before synchronizing groups"
62 % sync_uuid
63 )
64 if m2m_changed.disconnect(sender=sender, dispatch_uid=sync_uuid):
65 logger.debug("Signal with uuid %s disconnected" % sync_uuid)
66 run_group_syncer(instance)
67
68 sync_uuid = uuid.uuid1()
69 logger.debug(
70 "m2m_changed signal hook reconnected with uuid: %s" % sync_uuid
71 )
72 m2m_changed.connect(
73 receiver=trigger_group_syncer,
74 dispatch_uid=sync_uuid,
75 sender=User.groups.through,
76 )
77 else:
78 run_group_syncer(instance)
79
80
81 m2m_changed.connect(
82 trigger_group_syncer, dispatch_uid=sync_uuid, sender=User.groups.through
83 )
84
85
86 @receiver(post_save, sender=GroupMember)
87 def add_online_group_member_to_django_group(
88 sender, instance: GroupMember, created=False, **kwargs
89 ):
90 online_group: OnlineGroup = instance.group
91 group: Group = online_group.group
92 user: User = instance.user
93 if user not in group.user_set.all():
94 group.user_set.add(user)
95
96
97 @receiver(pre_delete, sender=GroupMember)
98 def remove_online_group_members_from_django_group(
99 sender, instance: GroupMember, **kwargs
100 ):
101 online_group: OnlineGroup = instance.group
102 group: Group = online_group.group
103 user: User = instance.user
104 if user in group.user_set.all():
105 group.user_set.remove(user)
106
[end of apps/authentication/signals.py]
[start of apps/gsuite/mail_syncer/signals.py]
1 import logging
2
3 from django.contrib.auth import get_user_model
4 from django.db.models.signals import pre_save
5 from django.dispatch import receiver
6
7 from .tasks import update_mailing_list
8
9 User = get_user_model()
10
11 MAILING_LIST_USER_FIELDS_TO_LIST_NAME = {"infomail": "info", "jobmail": "oppdrag"}
12
13 logger = logging.getLogger(__name__)
14
15
16 def get_updated_mailing_list_fields(user):
17 updated_mailing_lists = []
18 try:
19 # Get the current user and find out what's about to change
20 current_user = User.objects.get(pk=user.pk)
21 if user.infomail != current_user.infomail:
22 updated_mailing_lists.append("infomail")
23 if user.jobmail != current_user.jobmail:
24 updated_mailing_lists.append("jobmail")
25 except User.DoesNotExist:
26 # Find out which mailing lists are opted into if the user did not previously exist
27 for mailing_list in MAILING_LIST_USER_FIELDS_TO_LIST_NAME.keys():
28 if getattr(user, mailing_list, False):
29 updated_mailing_lists.append(mailing_list)
30
31 return updated_mailing_lists
32
33
34 @receiver(pre_save, sender=User)
35 def toggle_mailing_lists(sender, instance, **kwargs):
36 update_fields = get_updated_mailing_list_fields(instance)
37
38 if update_fields:
39 for mailing_list in MAILING_LIST_USER_FIELDS_TO_LIST_NAME.keys():
40 if mailing_list not in update_fields:
41 # Skips toggle if mailing list field not changed.
42 continue
43
44 g_suite_mailing_list = MAILING_LIST_USER_FIELDS_TO_LIST_NAME[mailing_list]
45
46 update_mailing_list.delay(
47 g_suite_mailing_list,
48 instance.primary_email,
49 getattr(instance, mailing_list),
50 )
51
[end of apps/gsuite/mail_syncer/signals.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/authentication/signals.py b/apps/authentication/signals.py
--- a/apps/authentication/signals.py
+++ b/apps/authentication/signals.py
@@ -5,17 +5,20 @@
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
-from django.db.models.signals import m2m_changed, post_save, pre_delete
+from django.db.models.signals import m2m_changed, post_save, pre_delete, pre_save
from django.dispatch import receiver
-from apps.authentication.models import GroupMember, OnlineGroup
+from apps.authentication.models import Email, GroupMember, OnlineGroup
from apps.authentication.tasks import SynchronizeGroups
from apps.gsuite.mail_syncer.main import update_g_suite_group, update_g_suite_user
+from apps.gsuite.mail_syncer.tasks import update_mailing_list
User = get_user_model()
logger = logging.getLogger("syncer.%s" % __name__)
sync_uuid = uuid.uuid1()
+MAILING_LIST_USER_FIELDS_TO_LIST_NAME = settings.MAILING_LIST_USER_FIELDS_TO_LIST_NAME
+
def run_group_syncer(user):
"""
@@ -103,3 +106,25 @@
user: User = instance.user
if user in group.user_set.all():
group.user_set.remove(user)
+
+
+@receiver(pre_save, sender=Email)
+def re_subscribe_primary_email_to_lists(sender, instance: Email, **kwargs):
+ user: User = instance.user
+ jobmail = MAILING_LIST_USER_FIELDS_TO_LIST_NAME.get("jobmail")
+ infomail = MAILING_LIST_USER_FIELDS_TO_LIST_NAME.get("infomail")
+ if instance.pk:
+ stored_instance: Email = Email.objects.get(pk=instance.pk)
+
+ # Handle case when the instance is changed to primary
+ if instance.primary and not stored_instance.primary:
+ if user.jobmail:
+ update_mailing_list.delay(jobmail, email=instance.email, added=True)
+ if user.infomail:
+ update_mailing_list.delay(infomail, email=instance.email, added=True)
+ # Handle case when the instance is changed from primary
+ elif not instance.primary and stored_instance.primary:
+ if user.jobmail:
+ update_mailing_list.delay(jobmail, email=instance.email, added=False)
+ if user.infomail:
+ update_mailing_list.delay(infomail, email=instance.email, added=False)
diff --git a/apps/gsuite/mail_syncer/signals.py b/apps/gsuite/mail_syncer/signals.py
--- a/apps/gsuite/mail_syncer/signals.py
+++ b/apps/gsuite/mail_syncer/signals.py
@@ -1,5 +1,6 @@
import logging
+from django.conf import settings
from django.contrib.auth import get_user_model
from django.db.models.signals import pre_save
from django.dispatch import receiver
@@ -8,7 +9,7 @@
User = get_user_model()
-MAILING_LIST_USER_FIELDS_TO_LIST_NAME = {"infomail": "info", "jobmail": "oppdrag"}
+MAILING_LIST_USER_FIELDS_TO_LIST_NAME = settings.MAILING_LIST_USER_FIELDS_TO_LIST_NAME
logger = logging.getLogger(__name__)
diff --git a/onlineweb4/settings/gsuite.py b/onlineweb4/settings/gsuite.py
--- a/onlineweb4/settings/gsuite.py
+++ b/onlineweb4/settings/gsuite.py
@@ -53,3 +53,8 @@
'techtalks' : 'techtalks'
}
}
+
+MAILING_LIST_USER_FIELDS_TO_LIST_NAME = {
+ 'infomail': 'info',
+ 'jobmail': 'oppdrag',
+}
| {"golden_diff": "diff --git a/apps/authentication/signals.py b/apps/authentication/signals.py\n--- a/apps/authentication/signals.py\n+++ b/apps/authentication/signals.py\n@@ -5,17 +5,20 @@\n from django.conf import settings\n from django.contrib.auth import get_user_model\n from django.contrib.auth.models import Group\n-from django.db.models.signals import m2m_changed, post_save, pre_delete\n+from django.db.models.signals import m2m_changed, post_save, pre_delete, pre_save\n from django.dispatch import receiver\n \n-from apps.authentication.models import GroupMember, OnlineGroup\n+from apps.authentication.models import Email, GroupMember, OnlineGroup\n from apps.authentication.tasks import SynchronizeGroups\n from apps.gsuite.mail_syncer.main import update_g_suite_group, update_g_suite_user\n+from apps.gsuite.mail_syncer.tasks import update_mailing_list\n \n User = get_user_model()\n logger = logging.getLogger(\"syncer.%s\" % __name__)\n sync_uuid = uuid.uuid1()\n \n+MAILING_LIST_USER_FIELDS_TO_LIST_NAME = settings.MAILING_LIST_USER_FIELDS_TO_LIST_NAME\n+\n \n def run_group_syncer(user):\n \"\"\"\n@@ -103,3 +106,25 @@\n user: User = instance.user\n if user in group.user_set.all():\n group.user_set.remove(user)\n+\n+\n+@receiver(pre_save, sender=Email)\n+def re_subscribe_primary_email_to_lists(sender, instance: Email, **kwargs):\n+ user: User = instance.user\n+ jobmail = MAILING_LIST_USER_FIELDS_TO_LIST_NAME.get(\"jobmail\")\n+ infomail = MAILING_LIST_USER_FIELDS_TO_LIST_NAME.get(\"infomail\")\n+ if instance.pk:\n+ stored_instance: Email = Email.objects.get(pk=instance.pk)\n+\n+ # Handle case when the instance is changed to primary\n+ if instance.primary and not stored_instance.primary:\n+ if user.jobmail:\n+ update_mailing_list.delay(jobmail, email=instance.email, added=True)\n+ if user.infomail:\n+ update_mailing_list.delay(infomail, email=instance.email, added=True)\n+ # Handle case when the instance is changed from primary\n+ elif not instance.primary and stored_instance.primary:\n+ if user.jobmail:\n+ update_mailing_list.delay(jobmail, email=instance.email, added=False)\n+ if user.infomail:\n+ update_mailing_list.delay(infomail, email=instance.email, added=False)\ndiff --git a/apps/gsuite/mail_syncer/signals.py b/apps/gsuite/mail_syncer/signals.py\n--- a/apps/gsuite/mail_syncer/signals.py\n+++ b/apps/gsuite/mail_syncer/signals.py\n@@ -1,5 +1,6 @@\n import logging\n \n+from django.conf import settings\n from django.contrib.auth import get_user_model\n from django.db.models.signals import pre_save\n from django.dispatch import receiver\n@@ -8,7 +9,7 @@\n \n User = get_user_model()\n \n-MAILING_LIST_USER_FIELDS_TO_LIST_NAME = {\"infomail\": \"info\", \"jobmail\": \"oppdrag\"}\n+MAILING_LIST_USER_FIELDS_TO_LIST_NAME = settings.MAILING_LIST_USER_FIELDS_TO_LIST_NAME\n \n logger = logging.getLogger(__name__)\n \ndiff --git a/onlineweb4/settings/gsuite.py b/onlineweb4/settings/gsuite.py\n--- a/onlineweb4/settings/gsuite.py\n+++ b/onlineweb4/settings/gsuite.py\n@@ -53,3 +53,8 @@\n 'techtalks' : 'techtalks'\n }\n }\n+\n+MAILING_LIST_USER_FIELDS_TO_LIST_NAME = {\n+ 'infomail': 'info',\n+ 'jobmail': 'oppdrag',\n+}\n", "issue": "Changing primary email address should change email address used for list subscriptions\n## What kind of an issue is this?\r\n\r\n- Bug report\r\n\r\n\r\n## What is the expected behaviour?\r\n\r\nIf I change my primary email address I expect the mailing list services to be updated with my new address, whilst removing my old address.\r\n\r\n## What is the current behaviour?\r\n\r\nThe old address is not removed and the new address is not added.\r\n\r\n\r\n## How do you reproduce this problem? \r\n\r\n* (Add a new email to an account) (_Only required if there is no alternative email added to the account already_)\r\n* Set the new email address as the primary\r\n* _Notice that the action happens immediately_\r\n * (Vs. unsubscribing / subscribing to a list, which queries the mailing list service to add/remove addresses.)\r\n* Verify that the old primary address is still on the list\r\n* Verify that the new primary address is absent from the list\r\n\r\n## Other information\r\n\r\nThe settings page tells the user that the primary address is the one used for mailing lists:\r\n\r\n> **Adressen som er markert som prim\u00e6r brukes til alle v\u00e5re e-postlister**, gjenoppretning av passord og andre varsler som sendes fra oss.\r\n\r\nThe [`set_primary`](https://github.com/dotkom/onlineweb4/blob/v4.24.28/apps/profiles/views.py#L326) function does not handle subscribing/unsubscribing to a list. (Neither does the [`Email model`](https://github.com/dotkom/onlineweb4/blob/v4.24.28/apps/authentication/models.py#L278) nor any signals attached to it.)\r\n\n", "before_files": [{"content": "import os\n\nfrom decouple import config\n\nfrom .base import PROJECT_ROOT_DIRECTORY\n\nOW4_GSUITE_CREDENTIALS_FILENAME = config('OW4_GSUITE_CREDENTIALS_FILENAME', default='gsuitecredentials.json')\nOW4_GSUITE_CREDENTIALS_PATH = config('OW4_GSUITE_CREDENTIALS_PATH',\n default=os.path.join(PROJECT_ROOT_DIRECTORY, OW4_GSUITE_CREDENTIALS_FILENAME))\n\nOW4_GSUITE_SETTINGS = {\n 'CREDENTIALS': OW4_GSUITE_CREDENTIALS_PATH,\n 'DOMAIN': config('OW4_GSUITE_SYNC_DOMAIN', default='online.ntnu.no'),\n # DELEGATED_ACCOUNT: G Suite Account with proper permissions to perform insertions and removals.\n 'DELEGATED_ACCOUNT': config('OW4_GSUITE_DELEGATED_ACCOUNT', default=''),\n 'ENABLED': config('OW4_GSUITE_ENABLED', cast=bool, default=False),\n}\n\nOW4_GSUITE_ACCOUNTS = {\n 'ENABLED': config('OW4_GSUITE_ACCOUNTS_ENABLED', cast=bool, default=False),\n 'ENABLE_INSERT': config('OW4_GSUITE_ACCOUNTS_ENABLE_INSERT', cast=bool, default=False),\n}\n\nOW4_GSUITE_SYNC = {\n 'CREDENTIALS': OW4_GSUITE_SETTINGS.get('CREDENTIALS'),\n 'DOMAIN': OW4_GSUITE_SETTINGS.get('DOMAIN'),\n 'DELEGATED_ACCOUNT': OW4_GSUITE_SETTINGS.get('DELEGATED_ACCOUNT'),\n 'ENABLED': config('OW4_GSUITE_SYNC_ENABLED', cast=bool, default=False),\n 'ENABLE_INSERT': config('OW4_GSUITE_SYNC_ENABLE_INSERT', cast=bool, default=False),\n 'ENABLE_DELETE': config('OW4_GSUITE_SYNC_ENABLE_DELETE', cast=bool, default=False),\n # OW4 name (lowercase) -> G Suite name (lowercase)\n 'GROUPS': {\n 'appkom': 'appkom',\n 'arrkom': 'arrkom',\n 'bankom': 'bankom',\n 'bedkom': 'bedkom',\n 'dotkom': 'dotkom',\n 'ekskom': 'ekskom',\n 'fagkom': 'fagkom',\n 'fond': 'fond',\n 'hovedstyret': 'hovedstyret',\n 'jubkom': 'jubkom',\n 'prokom': 'prokom',\n 'seniorkom': 'seniorkom',\n 'trikom': 'trikom',\n 'tillitsvalgte': 'tillitsvalgte',\n 'redaksjonen': 'redaksjonen',\n 'ekskom': 'ekskom',\n 'itex' : 'itex',\n 'velkom': 'velkom',\n 'interessegrupper': 'interessegrupper',\n 'online-il': 'online-il',\n 'techtalks' : 'techtalks'\n }\n}\n", "path": "onlineweb4/settings/gsuite.py"}, {"content": "# -*- coding: utf-8 -*-\nimport logging\nimport uuid\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Group\nfrom django.db.models.signals import m2m_changed, post_save, pre_delete\nfrom django.dispatch import receiver\n\nfrom apps.authentication.models import GroupMember, OnlineGroup\nfrom apps.authentication.tasks import SynchronizeGroups\nfrom apps.gsuite.mail_syncer.main import update_g_suite_group, update_g_suite_user\n\nUser = get_user_model()\nlogger = logging.getLogger(\"syncer.%s\" % __name__)\nsync_uuid = uuid.uuid1()\n\n\ndef run_group_syncer(user):\n \"\"\"\n Tasks to run after User is changed.\n :param user: The user instance to sync groups for.\n :type user: OnlineUser\n :return: None\n \"\"\"\n SynchronizeGroups.run()\n if settings.OW4_GSUITE_SYNC.get(\"ENABLED\", False):\n ow4_gsuite_domain = settings.OW4_GSUITE_SYNC.get(\"DOMAIN\")\n if isinstance(user, User):\n logger.debug(\"Running G Suite syncer for user {}\".format(user))\n update_g_suite_user(ow4_gsuite_domain, user, suppress_http_errors=True)\n elif isinstance(user, Group):\n group = user\n logger.debug(\"Running G Suite syncer for group {}\".format(group))\n update_g_suite_group(\n ow4_gsuite_domain, group.name, suppress_http_errors=True\n )\n\n\n@receiver(post_save, sender=Group)\ndef trigger_group_syncer(sender, instance, created=False, **kwargs):\n \"\"\"\n :param sender: The model that triggered this hook\n :param instance: The model instance triggering this hook\n :param created: True if the instance was created, False if the instance was updated\n\n Calls the SynchronizeGroups Task if a group is updated. (Not if it's the initial creation of a group)\n \"\"\"\n global sync_uuid\n\n if created:\n # If a new instance is created, we do not need to trigger group sync.\n pass\n else:\n # If sync is triggered by adding a user to group or a group to a user\n # then we need to detach the signal hook listening to m2m changes on\n # those models as they will trigger a recursive call to this method.\n if sender == User.groups.through:\n logger.debug(\n \"Disconnect m2m_changed signal hook with uuid %s before synchronizing groups\"\n % sync_uuid\n )\n if m2m_changed.disconnect(sender=sender, dispatch_uid=sync_uuid):\n logger.debug(\"Signal with uuid %s disconnected\" % sync_uuid)\n run_group_syncer(instance)\n\n sync_uuid = uuid.uuid1()\n logger.debug(\n \"m2m_changed signal hook reconnected with uuid: %s\" % sync_uuid\n )\n m2m_changed.connect(\n receiver=trigger_group_syncer,\n dispatch_uid=sync_uuid,\n sender=User.groups.through,\n )\n else:\n run_group_syncer(instance)\n\n\nm2m_changed.connect(\n trigger_group_syncer, dispatch_uid=sync_uuid, sender=User.groups.through\n)\n\n\n@receiver(post_save, sender=GroupMember)\ndef add_online_group_member_to_django_group(\n sender, instance: GroupMember, created=False, **kwargs\n):\n online_group: OnlineGroup = instance.group\n group: Group = online_group.group\n user: User = instance.user\n if user not in group.user_set.all():\n group.user_set.add(user)\n\n\n@receiver(pre_delete, sender=GroupMember)\ndef remove_online_group_members_from_django_group(\n sender, instance: GroupMember, **kwargs\n):\n online_group: OnlineGroup = instance.group\n group: Group = online_group.group\n user: User = instance.user\n if user in group.user_set.all():\n group.user_set.remove(user)\n", "path": "apps/authentication/signals.py"}, {"content": "import logging\n\nfrom django.contrib.auth import get_user_model\nfrom django.db.models.signals import pre_save\nfrom django.dispatch import receiver\n\nfrom .tasks import update_mailing_list\n\nUser = get_user_model()\n\nMAILING_LIST_USER_FIELDS_TO_LIST_NAME = {\"infomail\": \"info\", \"jobmail\": \"oppdrag\"}\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_updated_mailing_list_fields(user):\n updated_mailing_lists = []\n try:\n # Get the current user and find out what's about to change\n current_user = User.objects.get(pk=user.pk)\n if user.infomail != current_user.infomail:\n updated_mailing_lists.append(\"infomail\")\n if user.jobmail != current_user.jobmail:\n updated_mailing_lists.append(\"jobmail\")\n except User.DoesNotExist:\n # Find out which mailing lists are opted into if the user did not previously exist\n for mailing_list in MAILING_LIST_USER_FIELDS_TO_LIST_NAME.keys():\n if getattr(user, mailing_list, False):\n updated_mailing_lists.append(mailing_list)\n\n return updated_mailing_lists\n\n\n@receiver(pre_save, sender=User)\ndef toggle_mailing_lists(sender, instance, **kwargs):\n update_fields = get_updated_mailing_list_fields(instance)\n\n if update_fields:\n for mailing_list in MAILING_LIST_USER_FIELDS_TO_LIST_NAME.keys():\n if mailing_list not in update_fields:\n # Skips toggle if mailing list field not changed.\n continue\n\n g_suite_mailing_list = MAILING_LIST_USER_FIELDS_TO_LIST_NAME[mailing_list]\n\n update_mailing_list.delay(\n g_suite_mailing_list,\n instance.primary_email,\n getattr(instance, mailing_list),\n )\n", "path": "apps/gsuite/mail_syncer/signals.py"}]} | 3,178 | 806 |
gh_patches_debug_8601 | rasdani/github-patches | git_diff | getmoto__moto-589 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
botocore.exceptions.ConnectionClosedError raised when calling change_resource_record_sets (Boto3)
I am not sure whether or not I should expect this to work, but I see there are currently similar tests in moto against boto so I thought I would inquire.
When using the Route53 client from boto3, a call to the change_resource_record_set method raises a botocore.exceptions.ConnectionClosedError. (botocore.exceptions.ConnectionClosedError: Connection was closed before we received a valid response from endpoint URL: "https://route53.amazonaws.com/2013-04-01/hostedzone/cc11c883/rrset/")
A test case to reproduce is below.
``` python
import boto3
import uuid
from moto import mock_route53
def guid():
return str(uuid.uuid4())
@mock_route53
def test_route53_rrset_fail():
client = boto3.client('route53')
# Create a new zone
zone_name = '{0}.com'.format(guid())
zone = client.create_hosted_zone(
Name=zone_name,
CallerReference=guid(),
HostedZoneConfig={'Comment': guid()}
)
zone_id = zone['HostedZone']['Id']
# Verify the zone is retrievable
z = client.get_hosted_zone(Id=zone_id)
assert z['HostedZone']['Id'] == zone_id
# Try to create a record set
# Raises botocore.exceptions.ConnectionClosedError
client.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
'Comment': guid(),
'Changes': [{
'Action': 'CREATE',
'ResourceRecordSet': {
'Name': 'foo.{0}'.format(zone_name),
'Type': 'A',
'ResourceRecords': [{'Value': '1.2.3.4'}]
}
}]
}
)
```
</issue>
<code>
[start of moto/route53/urls.py]
1 from __future__ import unicode_literals
2 from . import responses
3
4 url_bases = [
5 "https://route53.amazonaws.com/201.-..-../",
6 ]
7
8 url_paths = {
9 '{0}hostedzone$': responses.list_or_create_hostzone_response,
10 '{0}hostedzone/[^/]+$': responses.get_or_delete_hostzone_response,
11 '{0}hostedzone/[^/]+/rrset$': responses.rrset_response,
12 '{0}healthcheck': responses.health_check_response,
13 '{0}tags|trafficpolicyinstances/*': responses.not_implemented_response,
14 }
15
[end of moto/route53/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/moto/route53/urls.py b/moto/route53/urls.py
--- a/moto/route53/urls.py
+++ b/moto/route53/urls.py
@@ -8,7 +8,7 @@
url_paths = {
'{0}hostedzone$': responses.list_or_create_hostzone_response,
'{0}hostedzone/[^/]+$': responses.get_or_delete_hostzone_response,
- '{0}hostedzone/[^/]+/rrset$': responses.rrset_response,
+ '{0}hostedzone/[^/]+/rrset/?$': responses.rrset_response,
'{0}healthcheck': responses.health_check_response,
'{0}tags|trafficpolicyinstances/*': responses.not_implemented_response,
}
| {"golden_diff": "diff --git a/moto/route53/urls.py b/moto/route53/urls.py\n--- a/moto/route53/urls.py\n+++ b/moto/route53/urls.py\n@@ -8,7 +8,7 @@\n url_paths = {\n '{0}hostedzone$': responses.list_or_create_hostzone_response,\n '{0}hostedzone/[^/]+$': responses.get_or_delete_hostzone_response,\n- '{0}hostedzone/[^/]+/rrset$': responses.rrset_response,\n+ '{0}hostedzone/[^/]+/rrset/?$': responses.rrset_response,\n '{0}healthcheck': responses.health_check_response,\n '{0}tags|trafficpolicyinstances/*': responses.not_implemented_response,\n }\n", "issue": "botocore.exceptions.ConnectionClosedError raised when calling change_resource_record_sets (Boto3)\nI am not sure whether or not I should expect this to work, but I see there are currently similar tests in moto against boto so I thought I would inquire.\n\nWhen using the Route53 client from boto3, a call to the change_resource_record_set method raises a botocore.exceptions.ConnectionClosedError. (botocore.exceptions.ConnectionClosedError: Connection was closed before we received a valid response from endpoint URL: \"https://route53.amazonaws.com/2013-04-01/hostedzone/cc11c883/rrset/\")\n\nA test case to reproduce is below.\n\n``` python\nimport boto3\nimport uuid\nfrom moto import mock_route53\n\ndef guid():\n return str(uuid.uuid4())\n\n@mock_route53\ndef test_route53_rrset_fail():\n\n client = boto3.client('route53')\n\n # Create a new zone\n zone_name = '{0}.com'.format(guid())\n zone = client.create_hosted_zone(\n Name=zone_name,\n CallerReference=guid(),\n HostedZoneConfig={'Comment': guid()}\n )\n zone_id = zone['HostedZone']['Id']\n\n # Verify the zone is retrievable\n z = client.get_hosted_zone(Id=zone_id)\n assert z['HostedZone']['Id'] == zone_id\n\n # Try to create a record set\n # Raises botocore.exceptions.ConnectionClosedError\n client.change_resource_record_sets(\n HostedZoneId=zone_id,\n ChangeBatch={\n 'Comment': guid(),\n 'Changes': [{\n 'Action': 'CREATE',\n 'ResourceRecordSet': {\n 'Name': 'foo.{0}'.format(zone_name),\n 'Type': 'A',\n 'ResourceRecords': [{'Value': '1.2.3.4'}]\n }\n }]\n }\n )\n```\n\n", "before_files": [{"content": "from __future__ import unicode_literals\nfrom . import responses\n\nurl_bases = [\n \"https://route53.amazonaws.com/201.-..-../\",\n]\n\nurl_paths = {\n '{0}hostedzone$': responses.list_or_create_hostzone_response,\n '{0}hostedzone/[^/]+$': responses.get_or_delete_hostzone_response,\n '{0}hostedzone/[^/]+/rrset$': responses.rrset_response,\n '{0}healthcheck': responses.health_check_response,\n '{0}tags|trafficpolicyinstances/*': responses.not_implemented_response,\n}\n", "path": "moto/route53/urls.py"}]} | 1,106 | 176 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.